^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * with Common Isochronous Packet (IEC 61883-1) headers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/firewire.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/firewire-constants.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <sound/pcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <sound/pcm_params.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "amdtp-stream.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define TICKS_PER_CYCLE 3072
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define CYCLES_PER_SECOND 8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define OHCI_MAX_SECOND 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Always support Linux tracing subsystem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "amdtp-stream-trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* isochronous header parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define ISO_DATA_LENGTH_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define TAG_NO_CIP_HEADER 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define TAG_CIP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* common isochronous packet header parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define CIP_EOH_SHIFT 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define CIP_EOH (1u << CIP_EOH_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CIP_EOH_MASK 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define CIP_SID_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define CIP_SID_MASK 0x3f000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define CIP_DBS_MASK 0x00ff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CIP_DBS_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CIP_SPH_MASK 0x00000400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define CIP_SPH_SHIFT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CIP_DBC_MASK 0x000000ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define CIP_FMT_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define CIP_FMT_MASK 0x3f000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define CIP_FDF_MASK 0x00ff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define CIP_FDF_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define CIP_SYT_MASK 0x0000ffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define CIP_SYT_NO_INFO 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Audio and Music transfer protocol specific parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define CIP_FMT_AM 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define AMDTP_FDF_NO_DATA 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) // For iso header, tstamp and 2 CIP header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define IR_CTX_HEADER_SIZE_CIP 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) // For iso header and tstamp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define IR_CTX_HEADER_SIZE_NO_CIP 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define HEADER_TSTAMP_MASK 0x0000ffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static void pcm_period_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * amdtp_stream_init - initialize an AMDTP stream structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @s: the AMDTP stream to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @unit: the target of the stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @dir: the direction of stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @flags: the packet transmission method to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @fmt: the value of fmt field in CIP header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * @process_ctx_payloads: callback handler to process payloads of isoc context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * @protocol_size: the size to allocate newly for protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) enum amdtp_stream_direction dir, enum cip_flags flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int fmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned int protocol_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (process_ctx_payloads == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) s->protocol = kzalloc(protocol_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!s->protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) s->unit = unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) s->direction = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) s->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) s->context = ERR_PTR(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) mutex_init(&s->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) INIT_WORK(&s->period_work, pcm_period_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) s->packet_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) init_waitqueue_head(&s->callback_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) s->callbacked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) s->fmt = fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) s->process_ctx_payloads = process_ctx_payloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (dir == AMDTP_OUT_STREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) s->ctx_data.rx.syt_override = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) EXPORT_SYMBOL(amdtp_stream_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * amdtp_stream_destroy - free stream resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @s: the AMDTP stream to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void amdtp_stream_destroy(struct amdtp_stream *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Not initialized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (s->protocol == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) WARN_ON(amdtp_stream_running(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) kfree(s->protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) mutex_destroy(&s->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) EXPORT_SYMBOL(amdtp_stream_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) [CIP_SFC_32000] = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) [CIP_SFC_44100] = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) [CIP_SFC_48000] = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) [CIP_SFC_88200] = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) [CIP_SFC_96000] = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) [CIP_SFC_176400] = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) [CIP_SFC_192000] = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) EXPORT_SYMBOL(amdtp_syt_intervals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) [CIP_SFC_32000] = 32000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) [CIP_SFC_44100] = 44100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) [CIP_SFC_48000] = 48000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) [CIP_SFC_88200] = 88200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) [CIP_SFC_96000] = 96000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) [CIP_SFC_176400] = 176400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) [CIP_SFC_192000] = 192000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) EXPORT_SYMBOL(amdtp_rate_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct snd_pcm_hw_rule *rule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct snd_interval *s = hw_param_interval(params, rule->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) const struct snd_interval *r =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct snd_interval t = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int step = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) for (i = 0; i < CIP_SFC_COUNT; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (snd_interval_test(r, amdtp_rate_table[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) step = max(step, amdtp_syt_intervals[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) t.min = roundup(s->min, step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) t.max = rounddown(s->max, step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) t.integer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return snd_interval_refine(s, &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @s: the AMDTP stream, which must be initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @runtime: the PCM substream runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct snd_pcm_runtime *runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct snd_pcm_hardware *hw = &runtime->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned int ctx_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned int maximum_usec_per_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) hw->info = SNDRV_PCM_INFO_BATCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) SNDRV_PCM_INFO_BLOCK_TRANSFER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) SNDRV_PCM_INFO_INTERLEAVED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) SNDRV_PCM_INFO_JOINT_DUPLEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) SNDRV_PCM_INFO_MMAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) SNDRV_PCM_INFO_MMAP_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* SNDRV_PCM_INFO_BATCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) hw->periods_min = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) hw->periods_max = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* bytes for a frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) hw->period_bytes_min = 4 * hw->channels_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Just to prevent from allocating much pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) hw->period_bytes_max = hw->period_bytes_min * 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) // Linux driver for 1394 OHCI controller voluntarily flushes isoc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) // context when total size of accumulated context header reaches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) // PAGE_SIZE. This kicks work for the isoc context and brings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) // callback in the middle of scheduled interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) // Although AMDTP streams in the same domain use the same events per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) // IRQ, use the largest size of context header between IT/IR contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) // Here, use the value of context header in IR context is for both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) // contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!(s->flags & CIP_NO_HEADER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) CYCLES_PER_SECOND / ctx_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) // In IEC 61883-6, one isoc packet can transfer events up to the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) // of syt interval. This comes from the interval of isoc cycle. As 1394
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) // OHCI controller can generate hardware IRQ per isoc packet, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) // interval is 125 usec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) // However, there are two ways of transmission in IEC 61883-6; blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) // and non-blocking modes. In blocking mode, the sequence of isoc packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) // includes 'empty' or 'NODATA' packets which include no event. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) // non-blocking mode, the number of events per packet is variable up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) // the syt interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) // Due to the above protocol design, the minimum PCM frames per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) // interrupt should be double of the value of syt interval, thus it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) // 250 usec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) err = snd_pcm_hw_constraint_minmax(runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) SNDRV_PCM_HW_PARAM_PERIOD_TIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 250, maximum_usec_per_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Non-Blocking stream has no more constraints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!(s->flags & CIP_BLOCKING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * One AMDTP packet can include some frames. In blocking mode, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * depending on its sampling rate. For accurate period interrupt, it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * preferrable to align period/buffer sizes to current SYT_INTERVAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) apply_constraint_to_size, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) SNDRV_PCM_HW_PARAM_RATE, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) apply_constraint_to_size, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) SNDRV_PCM_HW_PARAM_RATE, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * amdtp_stream_set_parameters - set stream parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * @s: the AMDTP stream to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * @rate: the sample rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * @data_block_quadlets: the size of a data block in quadlet unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * The parameters must be set before the stream is started, and must not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * changed while the stream is running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned int data_block_quadlets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned int sfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (amdtp_rate_table[sfc] == rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (sfc == ARRAY_SIZE(amdtp_rate_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) s->sfc = sfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) s->data_block_quadlets = data_block_quadlets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) s->syt_interval = amdtp_syt_intervals[sfc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) // default buffering in the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (s->direction == AMDTP_OUT_STREAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) s->ctx_data.rx.transfer_delay =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (s->flags & CIP_BLOCKING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) // additional buffering needed to adjust for no-data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) // packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) s->ctx_data.rx.transfer_delay +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) TICKS_PER_SECOND * s->syt_interval / rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) EXPORT_SYMBOL(amdtp_stream_set_parameters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * amdtp_stream_get_max_payload - get the stream's packet size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * @s: the AMDTP stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * This function must not be called before the stream has been configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * with amdtp_stream_set_parameters().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned int multiplier = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned int cip_header_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (s->flags & CIP_JUMBO_PAYLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) multiplier = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!(s->flags & CIP_NO_HEADER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cip_header_size = sizeof(__be32) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return cip_header_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) EXPORT_SYMBOL(amdtp_stream_get_max_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * amdtp_stream_pcm_prepare - prepare PCM device for running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @s: the AMDTP stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * This function should be called from the PCM device's .prepare callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) cancel_work_sync(&s->period_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) s->pcm_buffer_pointer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) s->pcm_period_pointer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static unsigned int calculate_data_blocks(unsigned int *data_block_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) bool is_blocking, bool is_no_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned int syt_interval, enum cip_sfc sfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned int data_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Blocking mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (is_blocking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* This module generate empty packet for 'no data'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (is_no_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) data_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) data_blocks = syt_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Non-blocking mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (!cip_sfc_is_base_44100(sfc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) // Sample_rate / 8000 is an integer, and precomputed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) data_blocks = *data_block_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned int phase = *data_block_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * This calculates the number of data blocks per packet so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * 1) the overall rate is correct and exactly synchronized to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * the bus clock, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * 2) packets with a rounded-up number of blocks occur as early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * as possible in the sequence (to prevent underruns of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * device's buffer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (sfc == CIP_SFC_44100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* 6 6 5 6 5 6 5 ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) data_blocks = 5 + ((phase & 1) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) (phase == 0 || phase >= 40));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) data_blocks = 11 * (sfc >> 1) + (phase == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (++phase >= (80 >> (sfc >> 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) phase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *data_block_state = phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return data_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned int *syt_offset_state, enum cip_sfc sfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned int syt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (*last_syt_offset < TICKS_PER_CYCLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!cip_sfc_is_base_44100(sfc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) syt_offset = *last_syt_offset + *syt_offset_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * The time, in ticks, of the n'th SYT_INTERVAL sample is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * n * SYT_INTERVAL * 24576000 / sample_rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Modulo TICKS_PER_CYCLE, the difference between successive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * elements is about 1386.23. Rounding the results of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * formula to the SYT precision results in a sequence of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * differences that begins with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * This code generates _exactly_ the same sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unsigned int phase = *syt_offset_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unsigned int index = phase % 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) syt_offset = *last_syt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) syt_offset += 1386 + ((index && !(index & 3)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) phase == 146);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (++phase >= 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) phase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) *syt_offset_state = phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *last_syt_offset = syt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (syt_offset >= TICKS_PER_CYCLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) syt_offset = CIP_SYT_NO_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return syt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void update_pcm_pointers(struct amdtp_stream *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct snd_pcm_substream *pcm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned int frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned int ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ptr = s->pcm_buffer_pointer + frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (ptr >= pcm->runtime->buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ptr -= pcm->runtime->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) WRITE_ONCE(s->pcm_buffer_pointer, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) s->pcm_period_pointer += frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (s->pcm_period_pointer >= pcm->runtime->period_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) s->pcm_period_pointer -= pcm->runtime->period_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) queue_work(system_highpri_wq, &s->period_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static void pcm_period_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct amdtp_stream *s = container_of(work, struct amdtp_stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) period_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (pcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) snd_pcm_period_elapsed(pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) bool sched_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) params->interrupt = sched_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) params->tag = s->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) params->sy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) s->buffer.packets[s->packet_index].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dev_err(&s->unit->device, "queueing error: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (++s->packet_index >= s->queue_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) s->packet_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static inline int queue_out_packet(struct amdtp_stream *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct fw_iso_packet *params, bool sched_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) params->skip =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) !!(params->header_length == 0 && params->payload_length == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return queue_packet(s, params, sched_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static inline int queue_in_packet(struct amdtp_stream *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct fw_iso_packet *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) // Queue one packet for IR context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) params->header_length = s->ctx_data.tx.ctx_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) params->skip = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return queue_packet(s, params, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned int data_block_counter, unsigned int syt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) (s->data_block_quadlets << CIP_DBS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) data_block_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) cip_header[1] = cpu_to_be32(CIP_EOH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) (syt & CIP_SYT_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct fw_iso_packet *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned int data_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned int data_block_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned int syt, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) unsigned int payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) __be32 *cip_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) params->payload_length = payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!(s->flags & CIP_NO_HEADER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) cip_header = (__be32 *)params->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) generate_cip_header(s, cip_header, data_block_counter, syt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) params->header_length = 2 * sizeof(__be32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) payload_length += params->header_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) cip_header = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) data_block_counter, s->packet_index, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) unsigned int payload_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unsigned int *data_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned int *data_block_counter, unsigned int *syt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) u32 cip_header[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned int sph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned int fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unsigned int fdf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned int dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) bool lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) cip_header[0] = be32_to_cpu(buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) cip_header[1] = be32_to_cpu(buf[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * This module supports 'Two-quadlet CIP header with SYT field'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * For convenience, also check FMT field is AM824 or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) dev_info_ratelimited(&s->unit->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) "Invalid CIP header for AMDTP: %08X:%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) cip_header[0], cip_header[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Check valid protocol or not. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (sph != s->sph || fmt != s->fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dev_info_ratelimited(&s->unit->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) "Detect unexpected protocol: %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) cip_header[0], cip_header[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* Calculate data blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (payload_length < sizeof(__be32) * 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) *data_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) unsigned int data_block_quadlets =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* avoid division by zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (data_block_quadlets == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dev_err(&s->unit->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) "Detect invalid value in dbs field: %08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) cip_header[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (s->flags & CIP_WRONG_DBS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) data_block_quadlets = s->data_block_quadlets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) *data_blocks = (payload_length / sizeof(__be32) - 2) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) data_block_quadlets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* Check data block counter continuity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dbc = cip_header[0] & CIP_DBC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) *data_block_counter != UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dbc = *data_block_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) *data_block_counter == UINT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) lost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) lost = dbc != *data_block_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned int dbc_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dbc_interval = s->ctx_data.tx.dbc_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) dbc_interval = *data_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (lost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) dev_err(&s->unit->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) "Detect discontinuity of CIP: %02X %02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) *data_block_counter, dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) *data_block_counter = dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *syt = cip_header[1] & CIP_SYT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) const __be32 *ctx_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) unsigned int *payload_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) unsigned int *data_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned int *data_block_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned int *syt, unsigned int packet_index, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) const __be32 *cip_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) unsigned int cip_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!(s->flags & CIP_NO_HEADER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) cip_header_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) cip_header_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) dev_err(&s->unit->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) "Detect jumbo payload: %04x %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (cip_header_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) cip_header = ctx_header + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) err = check_cip_header(s, cip_header, *payload_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) data_blocks, data_block_counter, syt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) cip_header = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) *data_blocks = *payload_length / sizeof(__be32) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) s->data_block_quadlets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) *syt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (*data_block_counter == UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) *data_block_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) *data_block_counter, packet_index, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static inline u32 compute_cycle_count(__be32 ctx_header_tstamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) cycle += addend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (cycle >= OHCI_MAX_SECOND * CYCLES_PER_SECOND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) cycle -= OHCI_MAX_SECOND * CYCLES_PER_SECOND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) // Align to actual cycle count for the packet which is going to be scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) // This module queued the same number of isochronous cycle as the size of queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) // to kip isochronous cycle, therefore it's OK to just increment the cycle by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) // the size of queue for scheduled cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) unsigned int queue_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) u32 cycle = compute_cycle_count(ctx_header_tstamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return increment_cycle_count(cycle, queue_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static int generate_device_pkt_descs(struct amdtp_stream *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct pkt_desc *descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) const __be32 *ctx_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) unsigned int packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) unsigned int dbc = s->data_block_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unsigned int packet_index = s->packet_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) unsigned int queue_size = s->queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) for (i = 0; i < packets; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct pkt_desc *desc = descs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) unsigned int cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) unsigned int payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned int data_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned int syt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) cycle = compute_cycle_count(ctx_header[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) &data_blocks, &dbc, &syt, packet_index, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) desc->cycle = cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) desc->syt = syt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) desc->data_blocks = data_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) desc->data_block_counter = dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) desc->ctx_payload = s->buffer.packets[packet_index].buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (!(s->flags & CIP_DBC_IS_END_EVENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) dbc = (dbc + desc->data_blocks) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ctx_header +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) packet_index = (packet_index + 1) % queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) s->data_block_counter = dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) unsigned int transfer_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) unsigned int syt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) syt_offset += transfer_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) (syt_offset % TICKS_PER_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return syt & CIP_SYT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) const __be32 *ctx_header, unsigned int packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) const struct seq_desc *seq_descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) unsigned int seq_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned int dbc = s->data_block_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) unsigned int seq_index = s->ctx_data.rx.seq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) for (i = 0; i < packets; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct pkt_desc *desc = descs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) unsigned int index = (s->packet_index + i) % s->queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) const struct seq_desc *seq = seq_descs + seq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unsigned int syt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) syt = seq->syt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (syt != CIP_SYT_NO_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) syt = compute_syt(syt, desc->cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) s->ctx_data.rx.transfer_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) desc->syt = syt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) desc->data_blocks = seq->data_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (s->flags & CIP_DBC_IS_END_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) dbc = (dbc + desc->data_blocks) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) desc->data_block_counter = dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (!(s->flags & CIP_DBC_IS_END_EVENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) dbc = (dbc + desc->data_blocks) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) desc->ctx_payload = s->buffer.packets[index].buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) seq_index = (seq_index + 1) % seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ++ctx_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) s->data_block_counter = dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) s->ctx_data.rx.seq_index = seq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static inline void cancel_stream(struct amdtp_stream *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) s->packet_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (in_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) amdtp_stream_pcm_abort(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static void process_ctx_payloads(struct amdtp_stream *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) const struct pkt_desc *descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) unsigned int packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct snd_pcm_substream *pcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned int pcm_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) pcm = READ_ONCE(s->pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (pcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) update_pcm_pointers(s, pcm, pcm_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) size_t header_length, void *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) void *private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct amdtp_stream *s = private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) const struct amdtp_domain *d = s->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) const __be32 *ctx_header = header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) unsigned int events_per_period = s->ctx_data.rx.events_per_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned int event_count = s->ctx_data.rx.event_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) unsigned int packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (s->packet_index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) // Calculate the number of packets in buffer and check XRUN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) packets = header_length / sizeof(*ctx_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq_descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) d->seq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) process_ctx_payloads(s, s->pkt_descs, packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) for (i = 0; i < packets; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) const struct pkt_desc *desc = s->pkt_descs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) unsigned int syt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct fw_iso_packet params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) __be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) } template = { {0}, {0} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) bool sched_irq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (s->ctx_data.rx.syt_override < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) syt = desc->syt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) syt = s->ctx_data.rx.syt_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) build_it_pkt_header(s, desc->cycle, &template.params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) desc->data_blocks, desc->data_block_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) syt, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (s == s->domain->irq_target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) event_count += desc->data_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (event_count >= events_per_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) event_count -= events_per_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) sched_irq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (queue_out_packet(s, &template.params, sched_irq) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) cancel_stream(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) s->ctx_data.rx.event_count = event_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) size_t header_length, void *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) void *private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct amdtp_stream *s = private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) __be32 *ctx_header = header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) unsigned int packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (s->packet_index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) // Calculate the number of packets in buffer and check XRUN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) packets = header_length / s->ctx_data.tx.ctx_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (err != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) cancel_stream(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) process_ctx_payloads(s, s->pkt_descs, packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) for (i = 0; i < packets; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct fw_iso_packet params = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (queue_in_packet(s, ¶ms) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) cancel_stream(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct amdtp_stream *irq_target = d->irq_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) unsigned int seq_tail = d->seq_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) unsigned int seq_size = d->seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) unsigned int min_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct amdtp_stream *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) min_avail = d->seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) list_for_each_entry(s, &d->streams, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) unsigned int seq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) unsigned int avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (s->direction == AMDTP_IN_STREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) seq_index = s->ctx_data.rx.seq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) avail = d->seq_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (seq_index > avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) avail += d->seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) avail -= seq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (avail < min_avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) min_avail = avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) while (min_avail < packets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct seq_desc *desc = d->seq_descs + seq_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) desc->syt_offset = calculate_syt_offset(&d->last_syt_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) &d->syt_offset_state, irq_target->sfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) desc->data_blocks = calculate_data_blocks(&d->data_block_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) !!(irq_target->flags & CIP_BLOCKING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) desc->syt_offset == CIP_SYT_NO_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) irq_target->syt_interval, irq_target->sfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ++seq_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) seq_tail %= seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ++min_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) d->seq_tail = seq_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static void irq_target_callback(struct fw_iso_context *context, u32 tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) size_t header_length, void *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) void *private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct amdtp_stream *irq_target = private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct amdtp_domain *d = irq_target->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) unsigned int packets = header_length / sizeof(__be32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct amdtp_stream *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) // Record enough entries with extra 3 cycles at least.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) pool_ideal_seq_descs(d, packets + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) out_stream_callback(context, tstamp, header_length, header, irq_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (amdtp_streaming_error(irq_target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) list_for_each_entry(s, &d->streams, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (s != irq_target && amdtp_stream_running(s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) fw_iso_context_flush_completions(s->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (amdtp_streaming_error(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (amdtp_stream_running(irq_target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) cancel_stream(irq_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) list_for_each_entry(s, &d->streams, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (amdtp_stream_running(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) cancel_stream(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) // this is executed one time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static void amdtp_stream_first_callback(struct fw_iso_context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) u32 tstamp, size_t header_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) void *header, void *private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct amdtp_stream *s = private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) const __be32 *ctx_header = header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u32 cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * For in-stream, first packet has come.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * For out-stream, prepared to transmit first packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) s->callbacked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) wake_up(&s->callback_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (s->direction == AMDTP_IN_STREAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) cycle = compute_cycle_count(ctx_header[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) context->callback.sc = in_stream_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) cycle = compute_it_cycle(*ctx_header, s->queue_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (s == s->domain->irq_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) context->callback.sc = irq_target_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) context->callback.sc = out_stream_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) s->start_cycle = cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) context->callback.sc(context, tstamp, header_length, header, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * amdtp_stream_start - start transferring packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * @s: the AMDTP stream to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * @channel: the isochronous channel on the bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * @speed: firewire speed code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * @start_cycle: the isochronous cycle to start the context. Start immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * if negative value is given.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * @queue_size: The number of packets in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * @idle_irq_interval: the interval to queue packet during initial state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * The stream cannot be started until it has been configured with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * device can be started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) int start_cycle, unsigned int queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) unsigned int idle_irq_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) bool is_irq_target = (s == s->domain->irq_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) unsigned int ctx_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) unsigned int max_ctx_payload_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int type, tag, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) mutex_lock(&s->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (WARN_ON(amdtp_stream_running(s) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) (s->data_block_quadlets < 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) err = -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (s->direction == AMDTP_IN_STREAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) // NOTE: IT context should be used for constant IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (is_irq_target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) s->data_block_counter = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) s->data_block_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) // initialize packet buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) max_ctx_payload_size = amdtp_stream_get_max_payload(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (s->direction == AMDTP_IN_STREAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) type = FW_ISO_CONTEXT_RECEIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (!(s->flags & CIP_NO_HEADER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) max_ctx_payload_size -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) type = FW_ISO_CONTEXT_TRANSMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ctx_header_size = 0; // No effect for IT context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (!(s->flags & CIP_NO_HEADER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) max_ctx_payload_size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) s->queue_size = queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) type, channel, speed, ctx_header_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) amdtp_stream_first_callback, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (IS_ERR(s->context)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) err = PTR_ERR(s->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (err == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dev_err(&s->unit->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) "no free stream on this controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) goto err_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) amdtp_stream_update(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (s->direction == AMDTP_IN_STREAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) s->ctx_data.tx.ctx_header_size = ctx_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (s->flags & CIP_NO_HEADER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) s->tag = TAG_NO_CIP_HEADER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) s->tag = TAG_CIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (!s->pkt_descs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) goto err_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) s->packet_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct fw_iso_packet params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (s->direction == AMDTP_IN_STREAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) err = queue_in_packet(s, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) bool sched_irq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) params.header_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) params.payload_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (is_irq_target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) sched_irq = !((s->packet_index + 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) idle_irq_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) err = queue_out_packet(s, ¶ms, sched_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) goto err_pkt_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) } while (s->packet_index > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* NOTE: TAG1 matches CIP. This just affects in stream. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) tag = FW_ISO_CONTEXT_MATCH_TAG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) tag |= FW_ISO_CONTEXT_MATCH_TAG0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) s->callbacked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) err = fw_iso_context_start(s->context, start_cycle, 0, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) goto err_pkt_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) mutex_unlock(&s->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) err_pkt_descs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) kfree(s->pkt_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) err_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) fw_iso_context_destroy(s->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) s->context = ERR_PTR(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) err_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) iso_packets_buffer_destroy(&s->buffer, s->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) mutex_unlock(&s->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * @d: the AMDTP domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * @s: the AMDTP stream that transports the PCM data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * Returns the current buffer position, in frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) struct amdtp_stream *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct amdtp_stream *irq_target = d->irq_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (irq_target && amdtp_stream_running(irq_target)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) // This function is called in software IRQ context of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) // period_work or process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) // When the software IRQ context was scheduled by software IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) // context of IT contexts, queued packets were already handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) // Therefore, no need to flush the queue in buffer furthermore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) // When the process context reach here, some packets will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) // already queued in the buffer. These packets should be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) // immediately to keep better granularity of PCM pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) // Later, the process context will sometimes schedules software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) // IRQ context of the period_work. Then, no need to flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) // queue by the same reason as described in the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (current_work() != &s->period_work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) // Queued packet should be processed without any kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) // preemption to keep latency against bus cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) fw_iso_context_flush_completions(irq_target->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return READ_ONCE(s->pcm_buffer_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * @d: the AMDTP domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * @s: the AMDTP stream that transfers the PCM frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * Returns zero always.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct amdtp_stream *irq_target = d->irq_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) // Process isochronous packets for recent isochronous cycle to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) // queued PCM frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (irq_target && amdtp_stream_running(irq_target)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) // Queued packet should be processed without any kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) // preemption to keep latency against bus cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) fw_iso_context_flush_completions(irq_target->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * amdtp_stream_update - update the stream after a bus reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * @s: the AMDTP stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) void amdtp_stream_update(struct amdtp_stream *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) /* Precomputing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) WRITE_ONCE(s->source_node_id_field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) EXPORT_SYMBOL(amdtp_stream_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * amdtp_stream_stop - stop sending packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * @s: the AMDTP stream to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * All PCM and MIDI devices of the stream must be stopped before the stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * itself can be stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static void amdtp_stream_stop(struct amdtp_stream *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) mutex_lock(&s->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (!amdtp_stream_running(s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) mutex_unlock(&s->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) cancel_work_sync(&s->period_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) fw_iso_context_stop(s->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) fw_iso_context_destroy(s->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) s->context = ERR_PTR(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) iso_packets_buffer_destroy(&s->buffer, s->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) kfree(s->pkt_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) s->callbacked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) mutex_unlock(&s->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * amdtp_stream_pcm_abort - abort the running PCM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * @s: the AMDTP stream about to be stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * If the isochronous stream needs to be stopped asynchronously, call this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * function first to stop the PCM device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) void amdtp_stream_pcm_abort(struct amdtp_stream *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) struct snd_pcm_substream *pcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) pcm = READ_ONCE(s->pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (pcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) snd_pcm_stop_xrun(pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) EXPORT_SYMBOL(amdtp_stream_pcm_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * amdtp_domain_init - initialize an AMDTP domain structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * @d: the AMDTP domain to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) int amdtp_domain_init(struct amdtp_domain *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) INIT_LIST_HEAD(&d->streams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) d->events_per_period = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) d->seq_descs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) EXPORT_SYMBOL_GPL(amdtp_domain_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * amdtp_domain_destroy - destroy an AMDTP domain structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * @d: the AMDTP domain to destroy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) void amdtp_domain_destroy(struct amdtp_domain *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) // At present nothing to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * amdtp_domain_add_stream - register isoc context into the domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * @d: the AMDTP domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * @s: the AMDTP stream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * @channel: the isochronous channel on the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * @speed: firewire speed code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) int channel, int speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct amdtp_stream *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) list_for_each_entry(tmp, &d->streams, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (s == tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) list_add(&s->list, &d->streams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) s->channel = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) s->speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) s->domain = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) int generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) int rcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) __be32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) // This is a request to local 1394 OHCI controller and expected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) // complete without any event waiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) generation = fw_card->generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) smp_rmb(); // node_id vs. generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) fw_card->node_id, generation, SCODE_100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) CSR_REGISTER_BASE + CSR_CYCLE_TIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ®, sizeof(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (rcode != RCODE_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) data = be32_to_cpu(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) *cur_cycle = data >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * amdtp_domain_start - start sending packets for isoc context in the domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * @d: the AMDTP domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * @ir_delay_cycle: the cycle delay to start all IR contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) unsigned int data_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) unsigned int syt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) } *entry, initial_state[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) [CIP_SFC_32000] = { 4, 3072 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) [CIP_SFC_48000] = { 6, 1024 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) [CIP_SFC_96000] = { 12, 1024 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) [CIP_SFC_192000] = { 24, 1024 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) [CIP_SFC_44100] = { 0, 67 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) [CIP_SFC_88200] = { 0, 67 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) [CIP_SFC_176400] = { 0, 67 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) unsigned int events_per_buffer = d->events_per_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) unsigned int events_per_period = d->events_per_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) unsigned int idle_irq_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) unsigned int queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) struct amdtp_stream *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) // Select an IT context as IRQ target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) list_for_each_entry(s, &d->streams, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (s->direction == AMDTP_OUT_STREAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) d->irq_target = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) // This is a case that AMDTP streams in domain run just for MIDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) // substream. Use the number of events equivalent to 10 msec as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) // interval of hardware IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (events_per_period == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (events_per_buffer == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) events_per_buffer = events_per_period * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) amdtp_rate_table[d->irq_target->sfc]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) d->seq_descs = kcalloc(queue_size, sizeof(*d->seq_descs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (!d->seq_descs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) d->seq_size = queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) d->seq_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) entry = &initial_state[s->sfc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) d->data_block_state = entry->data_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) d->syt_offset_state = entry->syt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) d->last_syt_offset = TICKS_PER_CYCLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (ir_delay_cycle > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct fw_card *fw_card = fw_parent_device(s->unit)->card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) err = get_current_cycle_time(fw_card, &cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) // No need to care overflow in cycle field because of enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) // width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) cycle += ir_delay_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) // Round up to sec field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) unsigned int sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) // The sec field can overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) sec = (cycle & 0xffffe000) >> 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) cycle = (++sec << 13) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ((cycle & 0x00001fff) / CYCLES_PER_SECOND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) // In OHCI 1394 specification, lower 2 bits are available for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) // sec field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) cycle &= 0x00007fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) cycle = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) list_for_each_entry(s, &d->streams, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int cycle_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (s->direction == AMDTP_IN_STREAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) cycle_match = cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) // IT context starts immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) cycle_match = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) s->ctx_data.rx.seq_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (s != d->irq_target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) err = amdtp_stream_start(s, s->channel, s->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) cycle_match, queue_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) s = d->irq_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) s->ctx_data.rx.events_per_period = events_per_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) s->ctx_data.rx.event_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) s->ctx_data.rx.seq_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) amdtp_rate_table[d->irq_target->sfc]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) err = amdtp_stream_start(s, s->channel, s->speed, -1, queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) idle_irq_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) list_for_each_entry(s, &d->streams, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) amdtp_stream_stop(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) kfree(d->seq_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) d->seq_descs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) EXPORT_SYMBOL_GPL(amdtp_domain_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * @d: the AMDTP domain to which the isoc contexts belong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) void amdtp_domain_stop(struct amdtp_domain *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) struct amdtp_stream *s, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (d->irq_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) amdtp_stream_stop(d->irq_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) list_for_each_entry_safe(s, next, &d->streams, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) list_del(&s->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (s != d->irq_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) amdtp_stream_stop(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) d->events_per_period = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) d->irq_target = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) kfree(d->seq_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) d->seq_descs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) EXPORT_SYMBOL_GPL(amdtp_domain_stop);