^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * System Control and Management Interface (SCMI) Message Protocol driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * SCMI Message Protocol is used between the System Control Processor(SCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * and the Application Processors(AP). The Message Handling Unit(MHU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * provides a mechanism for inter-processor communication between SCP's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Cortex M3 and AP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * SCP offers control and management of the core/cluster power states,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * various power domain DVFS including the core/cluster, certain system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * clocks configuration, thermal sensors and many others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Copyright (C) 2018-2020 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "notify.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <trace/events/scmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #undef CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <trace/hooks/scmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) enum scmi_error_codes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) SCMI_SUCCESS = 0, /* Success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) SCMI_ERR_SUPPORT = -1, /* Not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) SCMI_ERR_ENTRY = -4, /* Not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) SCMI_ERR_RANGE = -5, /* Value out of range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) SCMI_ERR_BUSY = -6, /* Device busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) SCMI_ERR_COMMS = -7, /* Communication Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) SCMI_ERR_GENERIC = -8, /* Generic Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) SCMI_ERR_HARDWARE = -9, /* Hardware Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* List of all SCMI devices active in system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static LIST_HEAD(scmi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Protection for the entire list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static DEFINE_MUTEX(scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Track the unique id for the transfers for debug & profiling purpose */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static atomic_t transfer_last_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static DEFINE_IDR(scmi_requested_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static DEFINE_MUTEX(scmi_requested_devices_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct scmi_requested_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) const struct scmi_device_id *id_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * struct scmi_xfers_info - Structure to manage transfer information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @xfer_block: Preallocated Message array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @xfer_alloc_table: Bitmap table for allocated messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Index of this bitmap table is also used for message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * sequence identifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * @xfer_lock: Protection for message allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct scmi_xfers_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct scmi_xfer *xfer_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long *xfer_alloc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) spinlock_t xfer_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * struct scmi_protocol_instance - Describe an initialized protocol instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @handle: Reference to the SCMI handle associated to this protocol instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * @proto: A reference to the protocol descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * @gid: A reference for per-protocol devres management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @users: A refcount to track effective users of this protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @priv: Reference for optional protocol private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @ph: An embedded protocol handle that will be passed down to protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * initialization code to identify this instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Each protocol is initialized independently once for each SCMI platform in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * which is defined by DT and implemented by the SCMI server fw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct scmi_protocol_instance {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) const struct scmi_handle *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) const struct scmi_protocol *proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void *gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) refcount_t users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct scmi_protocol_handle ph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * struct scmi_info - Structure representing a SCMI instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @dev: Device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @desc: SoC description for this instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @version: SCMI revision information containing protocol version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * implementation version and (sub-)vendor identification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @handle: Instance of SCMI handle to send to clients
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @tx_minfo: Universal Transmit Message management info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @rx_minfo: Universal Receive Message management info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @tx_idr: IDR object to map protocol id to Tx channel info pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @rx_idr: IDR object to map protocol id to Rx channel info pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @protocols: IDR for protocols' instance descriptors initialized for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * this SCMI instance: populated on protocol's first attempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * @protocols_mtx: A mutex to protect protocols instances initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @protocols_imp: List of protocols implemented, currently maximum of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * MAX_PROTOCOLS_IMP elements allocated by the base protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * @active_protocols: IDR storing device_nodes for protocols actually defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * in the DT and confirmed as implemented by fw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * @notify_priv: Pointer to private data structure specific to notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * @node: List head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @users: Number of users of this instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct scmi_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) const struct scmi_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct scmi_revision_info version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct scmi_handle handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct scmi_xfers_info tx_minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct scmi_xfers_info rx_minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct idr tx_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct idr rx_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct idr protocols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Ensure mutual exclusive access to protocols instance array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct mutex protocols_mtx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u8 *protocols_imp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct idr active_protocols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void *notify_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static const int scmi_linux_errmap[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* better than switch case as long as return value is continuous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 0, /* SCMI_SUCCESS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) -EINVAL, /* SCMI_ERR_PARAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) -EACCES, /* SCMI_ERR_ACCESS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) -ENOENT, /* SCMI_ERR_ENTRY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) -ERANGE, /* SCMI_ERR_RANGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) -EBUSY, /* SCMI_ERR_BUSY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) -ECOMM, /* SCMI_ERR_COMMS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) -EIO, /* SCMI_ERR_GENERIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) -EREMOTEIO, /* SCMI_ERR_HARDWARE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) -EPROTO, /* SCMI_ERR_PROTOCOL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static inline int scmi_to_linux_errno(int errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int err_idx = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return scmi_linux_errmap[err_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * scmi_dump_header_dbg() - Helper to dump a message header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @dev: Device pointer corresponding to the SCMI entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * @hdr: pointer to header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static inline void scmi_dump_header_dbg(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct scmi_msg_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) hdr->id, hdr->seq, hdr->protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void scmi_set_notification_instance_data(const struct scmi_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct scmi_info *info = handle_to_scmi_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) info->notify_priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Ensure updated protocol private date are visible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) void *scmi_get_notification_instance_data(const struct scmi_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct scmi_info *info = handle_to_scmi_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Ensure protocols_private_data has been updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return info->notify_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * scmi_xfer_get() - Allocate one message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @handle: Pointer to SCMI entity handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @minfo: Pointer to Tx/Rx Message management info based on channel type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Helper function which is used by various message functions that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * exposed to clients of this driver for allocating a message traffic event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * This function can sleep depending on pending requests already in the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * for the SCMI entity. Further, this also holds a spinlock to maintain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * integrity of internal data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Return: 0 if all went fine, else corresponding error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct scmi_xfers_info *minfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u16 xfer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct scmi_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long flags, bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct scmi_info *info = handle_to_scmi_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Keep the locked section as small as possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) spin_lock_irqsave(&minfo->xfer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) info->desc->max_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (bit_pos == info->desc->max_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) spin_unlock_irqrestore(&minfo->xfer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) set_bit(bit_pos, minfo->xfer_alloc_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) spin_unlock_irqrestore(&minfo->xfer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) xfer_id = bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) xfer = &minfo->xfer_block[xfer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) xfer->hdr.seq = xfer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) reinit_completion(&xfer->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) xfer->transfer_id = atomic_inc_return(&transfer_last_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * __scmi_xfer_put() - Release a message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @minfo: Pointer to Tx/Rx Message management info based on channel type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * @xfer: message that was reserved by scmi_xfer_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * This holds a spinlock to maintain integrity of internal data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Keep the locked section as small as possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * NOTE: we might escape with smp_mb and no lock here..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * but just be conservative and symmetric.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) spin_lock_irqsave(&minfo->xfer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) spin_unlock_irqrestore(&minfo->xfer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct scmi_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct device *dev = cinfo->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct scmi_xfers_info *minfo = &info->rx_minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ktime_t ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ts = ktime_get_boottime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) xfer = scmi_xfer_get(cinfo->handle, minfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) dev_err(dev, "failed to get free message slot (%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) PTR_ERR(xfer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) info->desc->ops->clear_channel(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unpack_scmi_header(msg_hdr, &xfer->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) scmi_dump_header_dbg(dev, &xfer->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) xfer->hdr.protocol_id, xfer->hdr.seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) MSG_TYPE_NOTIFICATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) __scmi_xfer_put(minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) info->desc->ops->clear_channel(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void scmi_handle_response(struct scmi_chan_info *cinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) u16 xfer_id, u8 msg_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct scmi_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct device *dev = cinfo->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct scmi_xfers_info *minfo = &info->tx_minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Are we even expecting this? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dev_err(dev, "message for %d is not expected!\n", xfer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) info->desc->ops->clear_channel(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) xfer = &minfo->xfer_block[xfer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Even if a response was indeed expected on this slot at this point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * a buggy platform could wrongly reply feeding us an unexpected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * delayed response we're not prepared to handle: bail-out safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * blaming firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) "Delayed Response for %d not expected! Buggy F/W ?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) xfer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) info->desc->ops->clear_channel(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* It was unexpected, so nobody will clear the xfer if not us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) __scmi_xfer_put(minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (msg_type == MSG_TYPE_DELAYED_RESP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) xfer->rx.len = info->desc->max_msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) scmi_dump_header_dbg(dev, &xfer->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) info->desc->ops->fetch_response(cinfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) xfer->hdr.protocol_id, xfer->hdr.seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) msg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (msg_type == MSG_TYPE_DELAYED_RESP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) info->desc->ops->clear_channel(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) complete(xfer->async_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) complete(&xfer->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * scmi_rx_callback() - callback for receiving messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * @cinfo: SCMI channel info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * @msg_hdr: Message header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * Processes one received message to appropriate transfer information and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * signals completion of the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * NOTE: This function will be invoked in IRQ context, hence should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * as optimal as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) switch (msg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) case MSG_TYPE_NOTIFICATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) scmi_handle_notification(cinfo, msg_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case MSG_TYPE_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) case MSG_TYPE_DELAYED_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) scmi_handle_response(cinfo, xfer_id, msg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * xfer_put() - Release a transmit message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @ph: Pointer to SCMI protocol handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * @xfer: message that was reserved by scmi_xfer_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void xfer_put(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct scmi_xfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) const struct scmi_protocol_instance *pi = ph_to_pi(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct scmi_info *info = handle_to_scmi_info(pi->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) __scmi_xfer_put(&info->tx_minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct scmi_xfer *xfer, ktime_t stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return info->desc->ops->poll_done(cinfo, xfer) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ktime_after(ktime_get(), stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * do_xfer() - Do one transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @ph: Pointer to SCMI protocol handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * @xfer: Transfer to initiate and wait for response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Return: -ETIMEDOUT in case of no response, if transmit error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * return corresponding error, else if all goes well,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int do_xfer(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct scmi_xfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) const struct scmi_protocol_instance *pi = ph_to_pi(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct scmi_info *info = handle_to_scmi_info(pi->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct device *dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct scmi_chan_info *cinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Re-instate protocol id here from protocol handle so that cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * overridden by mistake (or malice) by the protocol code mangling with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * the scmi_xfer structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) xfer->hdr.protocol_id = pi->proto->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (unlikely(!cinfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) xfer->hdr.protocol_id, xfer->hdr.seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) xfer->hdr.poll_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ret = info->desc->ops->send_message(cinfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) dev_dbg(dev, "Failed to send message %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (xfer->hdr.poll_completion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (ktime_before(ktime_get(), stop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) info->desc->ops->fetch_response(cinfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* And we wait for the response. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) trace_android_vh_scmi_timeout_sync(&timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!wait_for_completion_timeout(&xfer->done, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dev_err(dev, "timed out in resp(caller: %pS)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!ret && xfer->hdr.status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ret = scmi_to_linux_errno(xfer->hdr.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (info->desc->ops->mark_txdone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) info->desc->ops->mark_txdone(cinfo, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) xfer->hdr.protocol_id, xfer->hdr.seq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct scmi_xfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) const struct scmi_protocol_instance *pi = ph_to_pi(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct scmi_info *info = handle_to_scmi_info(pi->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) xfer->rx.len = info->desc->max_msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * do_xfer_with_response() - Do one transfer and wait until the delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * response is received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * @ph: Pointer to SCMI protocol handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @xfer: Transfer to initiate and wait for response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * return corresponding error, else if all goes well, return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct scmi_xfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) const struct scmi_protocol_instance *pi = ph_to_pi(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) DECLARE_COMPLETION_ONSTACK(async_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) xfer->hdr.protocol_id = pi->proto->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) xfer->async_done = &async_response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ret = do_xfer(ph, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (!wait_for_completion_timeout(xfer->async_done, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) else if (xfer->hdr.status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ret = scmi_to_linux_errno(xfer->hdr.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) xfer->async_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * xfer_get_init() - Allocate and initialise one message for transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * @ph: Pointer to SCMI protocol handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * @msg_id: Message identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * @tx_size: transmit message size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * @rx_size: receive message size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * @p: pointer to the allocated and initialised message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * This function allocates the message using @scmi_xfer_get and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * initialise the header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * Return: 0 if all went fine with @p pointing to message, else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * corresponding error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static int xfer_get_init(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) u8 msg_id, size_t tx_size, size_t rx_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct scmi_xfer **p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct scmi_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) const struct scmi_protocol_instance *pi = ph_to_pi(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct scmi_info *info = handle_to_scmi_info(pi->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct scmi_xfers_info *minfo = &info->tx_minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct device *dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* Ensure we have sane transfer sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (rx_size > info->desc->max_msg_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) tx_size > info->desc->max_msg_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) xfer = scmi_xfer_get(pi->handle, minfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dev_err(dev, "failed to get free message slot(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) xfer->tx.len = tx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) xfer->rx.len = rx_size ? : info->desc->max_msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) xfer->hdr.id = msg_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) xfer->hdr.protocol_id = pi->proto->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) xfer->hdr.poll_completion = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) *p = xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * version_get() - command to get the revision of the SCMI entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * @ph: Pointer to SCMI protocol handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * @version: Holds returned version of protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * Updates the SCMI information in the internal data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) __le32 *rev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct scmi_xfer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ret = do_xfer(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rev_info = t->rx.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) *version = le32_to_cpu(*rev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) xfer_put(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * scmi_set_protocol_priv - Set protocol specific data at init time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * @ph: A reference to the protocol handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * @priv: The private data to set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Return: 0 on Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct scmi_protocol_instance *pi = ph_to_pi(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pi->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * scmi_get_protocol_priv - Set protocol specific data at init time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * @ph: A reference to the protocol handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * Return: Protocol private data if any was set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) const struct scmi_protocol_instance *pi = ph_to_pi(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return pi->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static const struct scmi_xfer_ops xfer_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) .version_get = version_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) .xfer_get_init = xfer_get_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) .reset_rx_to_maxsz = reset_rx_to_maxsz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) .do_xfer = do_xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) .do_xfer_with_response = do_xfer_with_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) .xfer_put = xfer_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * scmi_get_revision_area - Retrieve version memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * @ph: A reference to the protocol handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * A helper to grab the version memory area reference during SCMI Base protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * Return: A reference to the version memory area associated to the SCMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * instance underlying this protocol handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct scmi_revision_info *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) scmi_get_revision_area(const struct scmi_protocol_handle *ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) const struct scmi_protocol_instance *pi = ph_to_pi(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return pi->handle->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * scmi_get_protocol_instance - Protocol initialization helper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * @handle: A reference to the SCMI platform instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * @protocol_id: The protocol being requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * In case the required protocol has never been requested before for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * instance, allocate and initialize all the needed structures while handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * resource allocation with a dedicated per-protocol devres subgroup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * Return: A reference to an initialized protocol instance or error on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static struct scmi_protocol_instance * __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) void *gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct scmi_protocol_instance *pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct scmi_info *info = handle_to_scmi_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) mutex_lock(&info->protocols_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) pi = idr_find(&info->protocols, protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (pi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) refcount_inc(&pi->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) const struct scmi_protocol *proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* Fail if protocol not registered on bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) proto = scmi_get_protocol(protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (!proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ret = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* Protocol specific devres group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!pi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) goto clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) pi->gid = gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) pi->proto = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) pi->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) pi->ph.dev = handle->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) pi->ph.xops = &xfer_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) pi->ph.set_priv = scmi_set_protocol_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) pi->ph.get_priv = scmi_get_protocol_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) refcount_set(&pi->users, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* proto->init is assured NON NULL by scmi_protocol_register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ret = pi->proto->init_instance(&pi->ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ret = idr_alloc(&info->protocols, pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) protocol_id, protocol_id + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (ret != protocol_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) goto clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (pi->proto->events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) scmi_register_protocol_events(handle, pi->proto->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) &pi->ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) pi->proto->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) devres_close_group(handle->dev, pi->gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) dev_dbg(handle->dev, "Initialized protocol: 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) mutex_unlock(&info->protocols_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) scmi_put_protocol(protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) devres_release_group(handle->dev, gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) mutex_unlock(&info->protocols_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * scmi_acquire_protocol - Protocol acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * @handle: A reference to the SCMI platform instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * @protocol_id: The protocol being requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * Register a new user for the requested protocol on the specified SCMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * platform instance, possibly triggering its initialization on first user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * Return: 0 if protocol was acquired successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int scmi_acquire_protocol(const struct scmi_handle *handle, u8 protocol_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * scmi_release_protocol - Protocol de-initialization helper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * @handle: A reference to the SCMI platform instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * @protocol_id: The protocol being requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * Remove one user for the specified protocol and triggers de-initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * and resources de-allocation once the last user has gone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) void scmi_release_protocol(const struct scmi_handle *handle, u8 protocol_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct scmi_info *info = handle_to_scmi_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct scmi_protocol_instance *pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) mutex_lock(&info->protocols_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) pi = idr_find(&info->protocols, protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (WARN_ON(!pi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (refcount_dec_and_test(&pi->users)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) void *gid = pi->gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (pi->proto->events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) scmi_deregister_protocol_events(handle, protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (pi->proto->deinit_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) pi->proto->deinit_instance(&pi->ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) idr_remove(&info->protocols, protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) scmi_put_protocol(protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) devres_release_group(handle->dev, gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) mutex_unlock(&info->protocols_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) u8 *prot_imp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) const struct scmi_protocol_instance *pi = ph_to_pi(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct scmi_info *info = handle_to_scmi_info(pi->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) info->protocols_imp = prot_imp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct scmi_info *info = handle_to_scmi_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!info->protocols_imp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (info->protocols_imp[i] == prot_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct scmi_protocol_devres {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) const struct scmi_handle *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) u8 protocol_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static void scmi_devm_release_protocol(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct scmi_protocol_devres *dres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) scmi_release_protocol(dres->handle, dres->protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static struct scmi_protocol_instance __must_check *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) __scmi_devres_get_protocol_instance(struct scmi_device *sdev, u8 protocol_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct scmi_protocol_devres *dres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct scmi_protocol_instance *pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) dres = devres_alloc(scmi_devm_release_protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) sizeof(*dres), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (!dres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (IS_ERR(pi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) devres_free(dres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dres->handle = sdev->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dres->protocol_id = protocol_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) devres_add(&sdev->dev, dres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * scmi_devm_get_protocol - Devres managed get protocol operations and handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * @sdev: A reference to an scmi_device whose embedded struct device is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * be used for devres accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * @protocol_id: The protocol being requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * @ph: A pointer reference used to pass back the associated protocol handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * Get hold of a protocol accounting for its usage, eventually triggering its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * initialization, and returning the protocol specific operations and related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * protocol handle which will be used as first argument in most of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * protocols operations methods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * Being a devres based managed method, protocol hold will be automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * released, and possibly de-initialized on last user, once the SCMI driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * owning the scmi_device is unbound from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Return: A reference to the requested protocol operations or error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * Must be checked for errors by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) static const void __must_check *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) scmi_devm_get_protocol(struct scmi_device *sdev, u8 protocol_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct scmi_protocol_handle **ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct scmi_protocol_instance *pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) pi = __scmi_devres_get_protocol_instance(sdev, protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (IS_ERR(pi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) *ph = &pi->ph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return pi->proto->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * scmi_devm_acquire_protocol - Devres managed helper to get hold of a protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * @sdev: A reference to an scmi_device whose embedded struct device is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * be used for devres accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * @protocol_id: The protocol being requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Get hold of a protocol accounting for its usage, possibly triggering its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * initialization but without getting access to its protocol specific operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * and handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * Being a devres based managed method, protocol hold will be automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * released, and possibly de-initialized on last user, once the SCMI driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * owning the scmi_device is unbound from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * Return: 0 on SUCCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static int __must_check scmi_devm_acquire_protocol(struct scmi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) u8 protocol_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct scmi_protocol_instance *pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) pi = __scmi_devres_get_protocol_instance(sdev, protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (IS_ERR(pi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return PTR_ERR(pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct scmi_protocol_devres *dres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (WARN_ON(!dres || !data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return dres->protocol_id == *((u8 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * scmi_devm_put_protocol - Devres managed put protocol operations and handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * @sdev: A reference to an scmi_device whose embedded struct device is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * be used for devres accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * @protocol_id: The protocol being requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * Explicitly release a protocol hold previously obtained calling the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * @scmi_devm_get_protocol_ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static void scmi_devm_put_protocol(struct scmi_device *sdev, u8 protocol_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) scmi_devm_protocol_match, &protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct scmi_handle *scmi_handle_get_from_info(struct scmi_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) info->users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return &info->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * scmi_handle_get() - Get the SCMI handle for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * @dev: pointer to device for which we want SCMI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * NOTE: The function does not track individual clients of the framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * and is expected to be maintained by caller of SCMI protocol library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * scmi_handle_put must be balanced with successful scmi_handle_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * Return: pointer to handle if successful, NULL on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct scmi_handle *scmi_handle_get(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct scmi_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct scmi_handle *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) mutex_lock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) list_for_each(p, &scmi_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) info = list_entry(p, struct scmi_info, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (dev->parent == info->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) handle = scmi_handle_get_from_info(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) mutex_unlock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * scmi_handle_put() - Release the handle acquired by scmi_handle_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * @handle: handle acquired by scmi_handle_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * NOTE: The function does not track individual clients of the framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * and is expected to be maintained by caller of SCMI protocol library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * scmi_handle_put must be balanced with successful scmi_handle_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * Return: 0 is successfully released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * if null was passed, it returns -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) int scmi_handle_put(const struct scmi_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct scmi_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) info = handle_to_scmi_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) mutex_lock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (!WARN_ON(!info->users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) info->users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) mutex_unlock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static int __scmi_xfer_info_init(struct scmi_info *sinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct scmi_xfers_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct scmi_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct device *dev = sinfo->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) const struct scmi_desc *desc = sinfo->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* Pre-allocated messages, no more than what hdr.seq can support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) "Invalid maximum messages %d, not in range [1 - %lu]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) desc->max_msg, MSG_TOKEN_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) info->xfer_block = devm_kcalloc(dev, desc->max_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) sizeof(*info->xfer_block), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (!info->xfer_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) sizeof(long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (!info->xfer_alloc_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* Pre-initialize the buffer pointer to pre-allocated buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!xfer->rx.buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) xfer->tx.buf = xfer->rx.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) init_completion(&xfer->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) spin_lock_init(&info->xfer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static int scmi_xfer_info_init(struct scmi_info *sinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) int prot_id, bool tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) int ret, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct scmi_chan_info *cinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct idr *idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* Transmit channel is first entry i.e. index 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) idx = tx ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) idr = tx ? &info->tx_idr : &info->rx_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* check if already allocated, used for multiple device per protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) cinfo = idr_find(idr, prot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (!info->desc->ops->chan_available(dev, idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) goto idr_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (!cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) cinfo->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) idr_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (ret != prot_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) cinfo->handle = &info->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int ret = scmi_chan_setup(info, dev, prot_id, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!ret) /* Rx is optional, hence no error check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) scmi_chan_setup(info, dev, prot_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * scmi_get_protocol_device - Helper to get/create an SCMI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * @np: A device node representing a valid active protocols for the referred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * SCMI instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * @info: The referred SCMI instance for which we are getting/creating this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * @prot_id: The protocol ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * @name: The device name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * Referring to the specific SCMI instance identified by @info, this helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * takes care to return a properly initialized device matching the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * @proto_id and @name: if device was still not existent it is created as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * child of the specified SCMI instance @info and its transport properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * initialized as usual.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static inline struct scmi_device *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) int prot_id, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct scmi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* Already created for this parent SCMI instance ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) sdev = scmi_find_child_dev(info->dev, prot_id, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) sdev = scmi_device_create(np, info->dev, prot_id, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (!sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) dev_err(info->dev, "failed to create %d protocol device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) prot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) dev_err(&sdev->dev, "failed to setup transport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) scmi_device_destroy(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) int prot_id, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct scmi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) sdev = scmi_get_protocol_device(np, info, prot_id, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (!sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* setup handle now as the transport is ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) scmi_set_handle(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * scmi_create_protocol_devices - Create devices for all pending requests for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * this SCMI instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * @np: The device node describing the protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * @info: The SCMI instance descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * @prot_id: The protocol ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * All devices previously requested for this instance (if any) are found and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * created by scanning the proper @&scmi_requested_devices entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static void scmi_create_protocol_devices(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct scmi_info *info, int prot_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct list_head *phead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) mutex_lock(&scmi_requested_devices_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) phead = idr_find(&scmi_requested_devices, prot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (phead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct scmi_requested_dev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) list_for_each_entry(rdev, phead, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) scmi_create_protocol_device(np, info, prot_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) rdev->id_table->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) mutex_unlock(&scmi_requested_devices_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * scmi_request_protocol_device - Helper to request a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * @id_table: A protocol/name pair descriptor for the device to be created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * This helper let an SCMI driver request specific devices identified by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * @id_table to be created for each active SCMI instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * The requested device name MUST NOT be already existent for any protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * at first the freshly requested @id_table is annotated in the IDR table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * @scmi_requested_devices, then a matching device is created for each already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * active SCMI instance. (if any)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * This way the requested device is created straight-away for all the already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * initialized(probed) SCMI instances (handles) and it remains also annotated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * as pending creation if the requesting SCMI driver was loaded before some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * SCMI instance and related transports were available: when such late instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * is probed, its probe will take care to scan the list of pending requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * devices and create those on its own (see @scmi_create_protocol_devices and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * its enclosing loop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * Return: 0 on Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) int scmi_request_protocol_device(const struct scmi_device_id *id_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) unsigned int id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct list_head *head, *phead = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) struct scmi_requested_dev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct scmi_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) pr_debug("Requesting SCMI device (%s) for protocol %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) id_table->name, id_table->protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * Search for the matching protocol rdev list and then search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * of any existent equally named device...fails if any duplicate found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) mutex_lock(&scmi_requested_devices_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) idr_for_each_entry(&scmi_requested_devices, head, id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (!phead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* A list found registered in the IDR is never empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) rdev = list_first_entry(head, struct scmi_requested_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (rdev->id_table->protocol_id ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) id_table->protocol_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) phead = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) list_for_each_entry(rdev, head, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (!strcmp(rdev->id_table->name, id_table->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) pr_err("Ignoring duplicate request [%d] %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) rdev->id_table->protocol_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) rdev->id_table->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * No duplicate found for requested id_table, so let's create a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * requested device entry for this new valid request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (!rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) rdev->id_table = id_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * Append the new requested device table descriptor to the head of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * related protocol list, eventually creating such head if not already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!phead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) phead = kzalloc(sizeof(*phead), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!phead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) kfree(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) INIT_LIST_HEAD(phead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ret = idr_alloc(&scmi_requested_devices, (void *)phead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) id_table->protocol_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) id_table->protocol_id + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (ret != id_table->protocol_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) pr_err("Failed to save SCMI device - ret:%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) kfree(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) kfree(phead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) list_add(&rdev->node, phead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * Now effectively create and initialize the requested device for every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * already initialized SCMI instance which has registered the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * protocol as a valid active one: i.e. defined in DT and supported by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * current platform FW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) mutex_lock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) list_for_each_entry(info, &scmi_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct device_node *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) child = idr_find(&info->active_protocols,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) id_table->protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) struct scmi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) sdev = scmi_get_protocol_device(child, info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) id_table->protocol_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) id_table->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /* Set handle if not already set: device existed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (sdev && !sdev->handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) sdev->handle = scmi_handle_get_from_info(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) dev_err(info->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) "Failed. SCMI protocol %d not active.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) id_table->protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) mutex_unlock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) mutex_unlock(&scmi_requested_devices_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * scmi_unrequest_protocol_device - Helper to unrequest a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * @id_table: A protocol/name pair descriptor for the device to be unrequested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * An helper to let an SCMI driver release its request about devices; note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * devices are created and initialized once the first SCMI driver request them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * but they destroyed only on SCMI core unloading/unbinding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * The current SCMI transport layer uses such devices as internal references and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * as such they could be shared as same transport between multiple drivers so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * that cannot be safely destroyed till the whole SCMI stack is removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * (unless adding further burden of refcounting.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) void scmi_unrequest_protocol_device(const struct scmi_device_id *id_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct list_head *phead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) id_table->name, id_table->protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) mutex_lock(&scmi_requested_devices_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (phead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) struct scmi_requested_dev *victim, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) list_for_each_entry_safe(victim, tmp, phead, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (!strcmp(victim->id_table->name, id_table->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) list_del(&victim->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) kfree(victim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (list_empty(phead)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) idr_remove(&scmi_requested_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) id_table->protocol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) kfree(phead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) mutex_unlock(&scmi_requested_devices_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static int scmi_cleanup_txrx_channels(struct scmi_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct idr *idr = &info->tx_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) idr_destroy(&info->tx_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) idr = &info->rx_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) idr_destroy(&info->rx_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int scmi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct scmi_handle *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) const struct scmi_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct scmi_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct device_node *child, *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) desc = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) info->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) info->desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) INIT_LIST_HEAD(&info->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) idr_init(&info->protocols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) mutex_init(&info->protocols_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) idr_init(&info->active_protocols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) platform_set_drvdata(pdev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) idr_init(&info->tx_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) idr_init(&info->rx_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) handle = &info->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) handle->dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) handle->version = &info->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) handle->devm_acquire_protocol = scmi_devm_acquire_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) handle->devm_get_protocol = scmi_devm_get_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) handle->devm_put_protocol = scmi_devm_put_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) ret = scmi_xfer_info_init(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) goto clear_txrx_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (scmi_notification_init(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) dev_err(dev, "SCMI Notifications NOT available.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * Trigger SCMI Base protocol initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * It's mandatory and won't be ever released/deinit until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * SCMI stack is shutdown/unloaded as a whole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) ret = scmi_acquire_protocol(handle, SCMI_PROTOCOL_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) dev_err(dev, "unable to communicate with SCMI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) goto notification_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) mutex_lock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) list_add_tail(&info->node, &scmi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) mutex_unlock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) for_each_available_child_of_node(np, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) u32 prot_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (of_property_read_u32(child, "reg", &prot_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) dev_err(dev, "Out of range protocol %d\n", prot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (!scmi_is_protocol_implemented(handle, prot_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) dev_err(dev, "SCMI protocol %d not implemented\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) prot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * Save this valid DT protocol descriptor amongst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * @active_protocols for this SCMI instance/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) ret = idr_alloc(&info->active_protocols, child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) prot_id, prot_id + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (ret != prot_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) dev_err(dev, "SCMI protocol %d already activated. Skip\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) prot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) of_node_get(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) scmi_create_protocol_devices(child, info, prot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) notification_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) scmi_notification_exit(&info->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) clear_txrx_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) scmi_cleanup_txrx_channels(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) idr_remove(idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static int scmi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) int ret = 0, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) struct scmi_info *info = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct device_node *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) mutex_lock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (info->users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) list_del(&info->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) mutex_unlock(&scmi_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) scmi_notification_exit(&info->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) mutex_lock(&info->protocols_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) idr_destroy(&info->protocols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) mutex_unlock(&info->protocols_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) idr_for_each_entry(&info->active_protocols, child, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) of_node_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) idr_destroy(&info->active_protocols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /* Safe to free channels since no more users */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return scmi_cleanup_txrx_channels(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static ssize_t protocol_version_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct scmi_info *info = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return sprintf(buf, "%u.%u\n", info->version.major_ver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) info->version.minor_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) static DEVICE_ATTR_RO(protocol_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static ssize_t firmware_version_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) struct scmi_info *info = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return sprintf(buf, "0x%x\n", info->version.impl_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static DEVICE_ATTR_RO(firmware_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) static ssize_t vendor_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct scmi_info *info = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return sprintf(buf, "%s\n", info->version.vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static DEVICE_ATTR_RO(vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static ssize_t sub_vendor_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct scmi_info *info = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) return sprintf(buf, "%s\n", info->version.sub_vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static DEVICE_ATTR_RO(sub_vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) static struct attribute *versions_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) &dev_attr_firmware_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) &dev_attr_protocol_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) &dev_attr_vendor_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) &dev_attr_sub_vendor_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) ATTRIBUTE_GROUPS(versions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) /* Each compatible listed below must have descriptor associated with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static const struct of_device_id scmi_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) #ifdef CONFIG_MAILBOX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) #ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) { /* Sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) MODULE_DEVICE_TABLE(of, scmi_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static struct platform_driver scmi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) .name = "arm-scmi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) .of_match_table = scmi_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) .dev_groups = versions_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) .probe = scmi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) .remove = scmi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static int __init scmi_driver_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) scmi_bus_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) scmi_base_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) scmi_clock_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) scmi_perf_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) scmi_power_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) scmi_reset_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) scmi_sensors_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) scmi_voltage_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) scmi_system_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return platform_driver_register(&scmi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) subsys_initcall(scmi_driver_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static void __exit scmi_driver_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) scmi_base_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) scmi_clock_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) scmi_perf_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) scmi_power_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) scmi_reset_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) scmi_sensors_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) scmi_voltage_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) scmi_system_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) scmi_bus_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) platform_driver_unregister(&scmi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) module_exit(scmi_driver_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) MODULE_ALIAS("platform:arm-scmi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) MODULE_DESCRIPTION("ARM SCMI protocol driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) MODULE_LICENSE("GPL v2");