^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <soc/tegra/ivc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define TEGRA_IVC_ALIGN 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * IVC channel reset protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Each end uses its tx_channel.state to indicate its synchronization state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) enum tegra_ivc_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * This value is zero for backwards compatibility with services that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * assume channels to be initially zeroed. Such channels are in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * initially valid state, but cannot be asynchronously reset, and must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * maintain a valid state at all times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * The transmitting end can enter the established state from the sync or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * ack state when it observes the receiving endpoint in the ack or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * established state, indicating that has cleared the counters in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * rx_channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) TEGRA_IVC_STATE_ESTABLISHED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * If an endpoint is observed in the sync state, the remote endpoint is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * allowed to clear the counters it owns asynchronously with respect to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * the current endpoint. Therefore, the current endpoint is no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * allowed to communicate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) TEGRA_IVC_STATE_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * When the transmitting end observes the receiving end in the sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * state, it can clear the w_count and r_count and transition to the ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * state. If the remote endpoint observes us in the ack state, it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * return to the established state once it has cleared its counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) TEGRA_IVC_STATE_ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * This structure is divided into two-cache aligned parts, the first is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * written through the tx.channel pointer, while the second is only written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * through the rx.channel pointer. This delineates ownership of the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * lines, which is critical to performance and necessary in non-cache coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct tegra_ivc_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* fields owned by the transmitting end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u8 pad[TEGRA_IVC_ALIGN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* fields owned by the receiving end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u8 pad[TEGRA_IVC_ALIGN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) } rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!ivc->peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!ivc->peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct tegra_ivc_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * This function performs multiple checks on the same values with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * security implications, so create snapshots with READ_ONCE() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * ensure that these checks use the same values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 tx = READ_ONCE(header->tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 rx = READ_ONCE(header->rx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Perform an over-full check to prevent denial of service attacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * where a server could be easily fooled into believing that there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * an extremely large number of frames ready, since receivers are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * expected to check for full or over-full conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Although the channel isn't empty, this is an invalid case caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * a potentially malicious peer, so returning empty is safer, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * it gives the impression that the channel has gone silent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (tx - rx > ivc->num_frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return tx == rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct tegra_ivc_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 tx = READ_ONCE(header->tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u32 rx = READ_ONCE(header->rx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Invalid cases where the counters indicate that the queue is over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * capacity also appear full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return tx - rx >= ivc->num_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct tegra_ivc_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u32 tx = READ_ONCE(header->tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u32 rx = READ_ONCE(header->rx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * This function isn't expected to be used in scenarios where an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * over-full situation can lead to denial of service attacks. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * comment in tegra_ivc_empty() for an explanation about special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * over-full considerations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return tx - rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) WRITE_ONCE(ivc->tx.channel->tx.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) READ_ONCE(ivc->tx.channel->tx.count) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (ivc->tx.position == ivc->num_frames - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ivc->tx.position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ivc->tx.position++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) WRITE_ONCE(ivc->rx.channel->rx.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) READ_ONCE(ivc->rx.channel->rx.count) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (ivc->rx.position == ivc->num_frames - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ivc->rx.position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ivc->rx.position++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * tx.channel->state is set locally, so it is not synchronized with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * state from the remote peer. The remote peer cannot reset its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * transmit counters until we've acknowledged its synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * request, so no additional synchronization is required because an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * asynchronous transition of rx.channel->state to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * TEGRA_IVC_STATE_ACK is not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Avoid unnecessary invalidations when performing repeated accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * to an IVC channel by checking the old queue pointers first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Synchronization is only necessary when these pointers indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * empty or full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (!tegra_ivc_empty(ivc, ivc->rx.channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (tegra_ivc_empty(ivc, ivc->rx.channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!tegra_ivc_full(ivc, ivc->tx.channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (tegra_ivc_full(ivc, ivc->tx.channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct tegra_ivc_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unsigned int frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (WARN_ON(frame >= ivc->num_frames))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return (void *)(header + 1) + ivc->frame_size * frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dma_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned int frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return phys + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dma_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) unsigned int frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dma_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned int frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* directly peek at the next frame rx'ed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (WARN_ON(ivc == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) err = tegra_ivc_check_read(ivc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * Order observation of ivc->rx.position potentially indicating new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * data before data read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ivc->frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int tegra_ivc_read_advance(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * No read barriers or synchronization here: the caller is expected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * have already observed the channel non-empty. This check is just to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * catch programming errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) err = tegra_ivc_check_read(ivc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) tegra_ivc_advance_rx(ivc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) tegra_ivc_flush(ivc, ivc->rx.phys + rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Ensure our write to ivc->rx.position occurs before our read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * ivc->tx.position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Notify only upon transition from full to non-full. The available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * count can only asynchronously increase, so the worst possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * side-effect will be a spurious notification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ivc->notify(ivc, ivc->notify_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) EXPORT_SYMBOL(tegra_ivc_read_advance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* directly poke at the next frame to be tx'ed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) err = tegra_ivc_check_write(ivc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* advance the tx buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int tegra_ivc_write_advance(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) err = tegra_ivc_check_write(ivc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ivc->frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Order any possible stores to the frame before update of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * ivc->tx.position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) tegra_ivc_advance_tx(ivc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) tegra_ivc_flush(ivc, ivc->tx.phys + tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * Ensure our write to ivc->tx.position occurs before our read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * ivc->rx.position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Notify only upon transition from empty to non-empty. The available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * count can only asynchronously decrease, so the worst possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * side-effect will be a spurious notification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ivc->notify(ivc, ivc->notify_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) EXPORT_SYMBOL(tegra_ivc_write_advance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) void tegra_ivc_reset(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) tegra_ivc_flush(ivc, ivc->tx.phys + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ivc->notify(ivc, ivc->notify_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) EXPORT_SYMBOL(tegra_ivc_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * =======================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * IVC State Transition Table - see tegra_ivc_notified()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * =======================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * local remote action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * ----- ------ -----------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * SYNC EST <none>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * SYNC ACK reset counters; move to EST; notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * SYNC SYNC reset counters; move to ACK; notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * ACK EST move to EST; notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * ACK ACK move to EST; notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * ACK SYNC reset counters; move to ACK; notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * EST EST <none>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * EST ACK <none>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * EST SYNC reset counters; move to ACK; notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * ===============================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int tegra_ivc_notified(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) enum tegra_ivc_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Copy the receiver's state out of shared memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) state = READ_ONCE(ivc->rx.channel->tx.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (state == TEGRA_IVC_STATE_SYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) offset = offsetof(struct tegra_ivc_header, tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Order observation of TEGRA_IVC_STATE_SYNC before stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * clearing tx.channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * Reset tx.channel counters. The remote end is in the SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * state and won't make progress until we change our state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * so the counters are not in use at this time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ivc->tx.channel->tx.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ivc->rx.channel->rx.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ivc->tx.position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ivc->rx.position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * Ensure that counters appear cleared before new state can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * observed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Move to ACK state. We have just cleared our counters, so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * is now safe for the remote end to start using these values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) tegra_ivc_flush(ivc, ivc->tx.phys + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Notify remote end to observe state transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ivc->notify(ivc, ivc->notify_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) state == TEGRA_IVC_STATE_ACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) offset = offsetof(struct tegra_ivc_header, tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Order observation of ivc_state_sync before stores clearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * tx_channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Reset tx.channel counters. The remote end is in the ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * state and won't make progress until we change our state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * so the counters are not in use at this time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ivc->tx.channel->tx.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ivc->rx.channel->rx.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ivc->tx.position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ivc->rx.position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * Ensure that counters appear cleared before new state can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * observed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * Move to ESTABLISHED state. We know that the remote end has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * already cleared its counters, so it is safe to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * writing/reading on this channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) tegra_ivc_flush(ivc, ivc->tx.phys + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Notify remote end to observe state transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ivc->notify(ivc, ivc->notify_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) offset = offsetof(struct tegra_ivc_header, tx.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * At this point, we have observed the peer to be in either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * the ACK or ESTABLISHED state. Next, order observation of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * peer state before storing to tx.channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * Move to ESTABLISHED state. We know that we have previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * cleared our counters, and we know that the remote end has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * cleared its counters, so it is safe to start writing/reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * on this channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) tegra_ivc_flush(ivc, ivc->tx.phys + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * Notify remote end to observe state transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ivc->notify(ivc, ivc->notify_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * There is no need to handle any further action. Either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * channel is already fully established, or we are waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * the remote end to catch up with our current state. Refer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * to the diagram in "IVC State Transition Table" above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) EXPORT_SYMBOL(tegra_ivc_notified);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) size_t tegra_ivc_align(size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return ALIGN(size, TEGRA_IVC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) EXPORT_SYMBOL(tegra_ivc_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) unsigned tegra_ivc_total_queue_size(unsigned queue_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) __func__, queue_size, TEGRA_IVC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return queue_size + sizeof(struct tegra_ivc_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) EXPORT_SYMBOL(tegra_ivc_total_queue_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unsigned int num_frames, size_t frame_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) TEGRA_IVC_ALIGN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) TEGRA_IVC_ALIGN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) TEGRA_IVC_ALIGN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) pr_err("num_frames * frame_size overflows\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) pr_err("frame size not adequately aligned: %zu\n", frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * The headers must at least be aligned enough for counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * to be accessed atomically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) pr_err("IVC channel start not aligned: %#lx\n", rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) pr_err("IVC channel start not aligned: %#lx\n", tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (rx < tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (rx + frame_size * num_frames > tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rx, frame_size * num_frames, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (tx + frame_size * num_frames > rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) tx, frame_size * num_frames, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) unsigned int num_frames, size_t frame_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) void (*notify)(struct tegra_ivc *ivc, void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) size_t queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (WARN_ON(!ivc || !notify))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * All sizes that can be returned by communication functions should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * fit in an int.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (frame_size > INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) num_frames, frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ivc->rx.phys = dma_map_single(peer, rx, queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (dma_mapping_error(peer, ivc->rx.phys))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ivc->tx.phys = dma_map_single(peer, tx, queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (dma_mapping_error(peer, ivc->tx.phys)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) dma_unmap_single(peer, ivc->rx.phys, queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ivc->rx.phys = rx_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ivc->tx.phys = tx_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ivc->rx.channel = rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ivc->tx.channel = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ivc->peer = peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) ivc->notify = notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ivc->notify_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ivc->frame_size = frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ivc->num_frames = num_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * These values aren't necessarily correct until the channel has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) ivc->tx.position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ivc->rx.position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) EXPORT_SYMBOL(tegra_ivc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) void tegra_ivc_cleanup(struct tegra_ivc *ivc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (ivc->peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ivc->frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) dma_unmap_single(ivc->peer, ivc->rx.phys, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dma_unmap_single(ivc->peer, ivc->tx.phys, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) EXPORT_SYMBOL(tegra_ivc_cleanup);