^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0 OR MIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Xen para-virtual sound device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2016-2018 EPAM Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <xen/platform_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <xen/xen-front-pgdir-shbuf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <xen/interface/io/sndif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xen_snd_front.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "xen_snd_front_alsa.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "xen_snd_front_evtchnl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static struct xensnd_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) be_stream_prepare_req(struct xen_snd_front_evtchnl *evtchnl, u8 operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct xensnd_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) req = RING_GET_REQUEST(&evtchnl->u.req.ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) evtchnl->u.req.ring.req_prod_pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) req->operation = operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) req->id = evtchnl->evt_next_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) evtchnl->evt_id = req->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static int be_stream_do_io(struct xen_snd_front_evtchnl *evtchnl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) reinit_completion(&evtchnl->u.req.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) xen_snd_front_evtchnl_flush(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static int be_stream_wait_io(struct xen_snd_front_evtchnl *evtchnl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (wait_for_completion_timeout(&evtchnl->u.req.completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) msecs_to_jiffies(VSND_WAIT_BACK_MS)) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return evtchnl->u.req.resp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct xensnd_query_hw_param *hw_param_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct xensnd_query_hw_param *hw_param_resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct xensnd_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) mutex_lock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) mutex_lock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) req->op.hw_param = *hw_param_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) mutex_unlock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ret = be_stream_do_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ret = be_stream_wait_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *hw_param_resp = evtchnl->u.req.resp.hw_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mutex_unlock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct xen_front_pgdir_shbuf *shbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u8 format, unsigned int channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned int rate, u32 buffer_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 period_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct xensnd_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) mutex_lock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) mutex_lock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) req->op.open.pcm_format = format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) req->op.open.pcm_channels = channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) req->op.open.pcm_rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) req->op.open.buffer_sz = buffer_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) req->op.open.period_sz = period_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) req->op.open.gref_directory =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) xen_front_pgdir_shbuf_get_dir_start(shbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) mutex_unlock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = be_stream_do_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ret = be_stream_wait_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) mutex_unlock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int xen_snd_front_stream_close(struct xen_snd_front_evtchnl *evtchnl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) __always_unused struct xensnd_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) mutex_lock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mutex_lock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) mutex_unlock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ret = be_stream_do_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ret = be_stream_wait_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) mutex_unlock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int xen_snd_front_stream_write(struct xen_snd_front_evtchnl *evtchnl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long pos, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct xensnd_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mutex_lock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mutex_lock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) req->op.rw.length = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) req->op.rw.offset = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) mutex_unlock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ret = be_stream_do_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ret = be_stream_wait_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mutex_unlock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int xen_snd_front_stream_read(struct xen_snd_front_evtchnl *evtchnl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned long pos, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct xensnd_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) mutex_lock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) mutex_lock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) req->op.rw.length = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) req->op.rw.offset = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) mutex_unlock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ret = be_stream_do_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ret = be_stream_wait_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) mutex_unlock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int xen_snd_front_stream_trigger(struct xen_snd_front_evtchnl *evtchnl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct xensnd_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) mutex_lock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) mutex_lock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) req->op.trigger.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mutex_unlock(&evtchnl->ring_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ret = be_stream_do_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ret = be_stream_wait_io(evtchnl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mutex_unlock(&evtchnl->u.req.req_io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void xen_snd_drv_fini(struct xen_snd_front_info *front_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) xen_snd_front_alsa_fini(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) xen_snd_front_evtchnl_free_all(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int sndback_initwait(struct xen_snd_front_info *front_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int num_streams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ret = xen_snd_front_cfg_card(front_info, &num_streams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* create event channels for all streams and publish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ret = xen_snd_front_evtchnl_create_all(front_info, num_streams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return xen_snd_front_evtchnl_publish_all(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int sndback_connect(struct xen_snd_front_info *front_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return xen_snd_front_alsa_init(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void sndback_disconnect(struct xen_snd_front_info *front_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) xen_snd_drv_fini(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) xenbus_switch_state(front_info->xb_dev, XenbusStateInitialising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void sndback_changed(struct xenbus_device *xb_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) enum xenbus_state backend_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct xen_snd_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dev_dbg(&xb_dev->dev, "Backend state is %s, front is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) xenbus_strstate(backend_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) xenbus_strstate(xb_dev->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) switch (backend_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) case XenbusStateReconfiguring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) case XenbusStateReconfigured:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) case XenbusStateInitialised:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) case XenbusStateInitialising:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Recovering after backend unexpected closure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) sndback_disconnect(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) case XenbusStateInitWait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Recovering after backend unexpected closure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) sndback_disconnect(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ret = sndback_initwait(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) xenbus_switch_state(xb_dev, XenbusStateInitialised);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) case XenbusStateConnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (xb_dev->state != XenbusStateInitialised)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ret = sndback_connect(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) xenbus_switch_state(xb_dev, XenbusStateConnected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) case XenbusStateClosing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * In this state backend starts freeing resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * so let it go into closed state first, so we can also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * remove ours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) case XenbusStateUnknown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case XenbusStateClosed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (xb_dev->state == XenbusStateClosed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) sndback_disconnect(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static int xen_drv_probe(struct xenbus_device *xb_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) const struct xenbus_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct xen_snd_front_info *front_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) front_info = devm_kzalloc(&xb_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) sizeof(*front_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!front_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) front_info->xb_dev = xb_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dev_set_drvdata(&xb_dev->dev, front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return xenbus_switch_state(xb_dev, XenbusStateInitialising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static int xen_drv_remove(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct xen_snd_front_info *front_info = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int to = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) xenbus_switch_state(dev, XenbusStateClosing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * On driver removal it is disconnected from XenBus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * so no backend state change events come via .otherend_changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * callback. This prevents us from exiting gracefully, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * signaling the backend to free event channels, waiting for its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * state to change to XenbusStateClosed and cleaning at our end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Normally when front driver removed backend will finally go into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * XenbusStateInitWait state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Workaround: read backend's state manually and wait with time-out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) XenbusStateUnknown) != XenbusStateInitWait) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) --to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) state = xenbus_read_unsigned(front_info->xb_dev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) "state", XenbusStateUnknown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) pr_err("Backend state is %s while removing driver\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) xenbus_strstate(state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) xen_snd_drv_fini(front_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) xenbus_frontend_closed(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static const struct xenbus_device_id xen_drv_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) { XENSND_DRIVER_NAME },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) { "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static struct xenbus_driver xen_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) .ids = xen_drv_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) .probe = xen_drv_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) .remove = xen_drv_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) .otherend_changed = sndback_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static int __init xen_drv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!xen_has_pv_devices())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (XEN_PAGE_SIZE != PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) pr_err(XENSND_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) XEN_PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pr_info("Initialising Xen " XENSND_DRIVER_NAME " frontend driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return xenbus_register_frontend(&xen_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static void __exit xen_drv_fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) pr_info("Unregistering Xen " XENSND_DRIVER_NAME " frontend driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) xenbus_unregister_driver(&xen_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) module_init(xen_drv_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) module_exit(xen_drv_fini);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) MODULE_DESCRIPTION("Xen virtual sound device frontend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) MODULE_ALIAS("xen:" XENSND_DRIVER_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) MODULE_SUPPORTED_DEVICE("{{ALSA,Virtual soundcard}}");