^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Texas Instruments System Control Interface Protocol Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Nishanth Menon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) "%s: " fmt, __func__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mailbox_client.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/semaphore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/soc/ti/ti-msgmgr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/soc/ti/ti_sci_protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "ti_sci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* List of all TI SCI devices active in system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static LIST_HEAD(ti_sci_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Protection for the entire list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static DEFINE_MUTEX(ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * struct ti_sci_xfer - Structure representing a message flow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * @tx_message: Transmit message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * @rx_len: Receive message length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * @xfer_buf: Preallocated buffer to store receive message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Since we work with request-ACK protocol, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * reuse the same buffer for the rx path as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * use for the tx path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @done: completion event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct ti_sci_xfer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct ti_msgmgr_message tx_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u8 rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u8 *xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct completion done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * struct ti_sci_xfers_info - Structure to manage transfer information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @sem_xfer_count: Counting Semaphore for managing max simultaneous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @xfer_block: Preallocated Message array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @xfer_alloc_table: Bitmap table for allocated messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Index of this bitmap table is also used for message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * sequence identifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * @xfer_lock: Protection for message allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct ti_sci_xfers_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct semaphore sem_xfer_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct ti_sci_xfer *xfer_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long *xfer_alloc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* protect transfer allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) spinlock_t xfer_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * struct ti_sci_desc - Description of SoC integration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @default_host_id: Host identifier representing the compute entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @max_msgs: Maximum number of messages that can be pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * simultaneously in the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @max_msg_size: Maximum size of data per message that can be handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct ti_sci_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u8 default_host_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int max_rx_timeout_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int max_msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int max_msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * struct ti_sci_info - Structure representing a TI SCI instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @dev: Device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @desc: SoC description for this instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @nb: Reboot Notifier block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * @d: Debugfs file entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * @debug_region: Memory region where the debug message are available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @debug_region_size: Debug region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @debug_buffer: Buffer allocated to copy debug messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @handle: Instance of TI SCI handle to send to clients.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @cl: Mailbox Client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @chan_tx: Transmit mailbox channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @chan_rx: Receive mailbox channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @minfo: Message info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * @node: list head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @host_id: Host ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @users: Number of users of this instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct ti_sci_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct notifier_block nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) const struct ti_sci_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct dentry *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void __iomem *debug_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) char *debug_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) size_t debug_region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct ti_sci_handle handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct mbox_client cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct mbox_chan *chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct mbox_chan *chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct ti_sci_xfers_info minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u8 host_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* protected by ti_sci_list_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * ti_sci_debug_show() - Helper to dump the debug log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * @s: sequence file pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * @unused: unused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Return: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int ti_sci_debug_show(struct seq_file *s, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct ti_sci_info *info = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) memcpy_fromio(info->debug_buffer, info->debug_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) info->debug_region_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * We don't trust firmware to leave NULL terminated last byte (hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * we have allocated 1 extra 0 byte). Since we cannot guarantee any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * specific data format for debug messages, We just present the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * in the buffer as is - we expect the messages to be self explanatory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) seq_puts(s, info->debug_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Provide the log file operations interface*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * ti_sci_debugfs_create() - Create log debug file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @pdev: platform device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @info: Pointer to SCI entity information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Return: 0 if all went fine, else corresponding error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int ti_sci_debugfs_create(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct ti_sci_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) char debug_name[50] = "ti_sci_debug@";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Debug region is optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) "debug_messages");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) info->debug_region = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (IS_ERR(info->debug_region))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) info->debug_region_size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) sizeof(char), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!info->debug_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Setup NULL termination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) info->debug_buffer[info->debug_region_size] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) sizeof(debug_name) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) sizeof("ti_sci_debug@")),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 0444, NULL, info, &ti_sci_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (IS_ERR(info->d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return PTR_ERR(info->d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) info->debug_region, info->debug_region_size, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * ti_sci_debugfs_destroy() - clean up log debug file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @pdev: platform device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * @info: Pointer to SCI entity information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void ti_sci_debugfs_destroy(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct ti_sci_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (IS_ERR(info->debug_region))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) debugfs_remove(info->d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #else /* CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static inline int ti_sci_debugfs_create(struct platform_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct ti_sci_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct ti_sci_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #endif /* CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * ti_sci_dump_header_dbg() - Helper to dump a message header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @dev: Device pointer corresponding to the SCI entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @hdr: pointer to header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static inline void ti_sci_dump_header_dbg(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct ti_sci_msg_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) hdr->type, hdr->host, hdr->seq, hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * ti_sci_rx_callback() - mailbox client callback for receive messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * @cl: client pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * @m: mailbox message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * Processes one received message to appropriate transfer information and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * signals completion of the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * NOTE: This function will be invoked in IRQ context, hence should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * as optimal as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct ti_sci_info *info = cl_to_ti_sci_info(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct device *dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct ti_sci_xfers_info *minfo = &info->minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct ti_msgmgr_message *mbox_msg = m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) u8 xfer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) xfer_id = hdr->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Are we even expecting this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * NOTE: barriers were implicit in locks used for modifying the bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) dev_err(dev, "Message for %d is not expected!\n", xfer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) xfer = &minfo->xfer_block[xfer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Is the message of valid length? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (mbox_msg->len > info->desc->max_msg_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) mbox_msg->len, info->desc->max_msg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ti_sci_dump_header_dbg(dev, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (mbox_msg->len < xfer->rx_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) dev_err(dev, "Recv xfer %zu < expected %d length\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) mbox_msg->len, xfer->rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ti_sci_dump_header_dbg(dev, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ti_sci_dump_header_dbg(dev, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Take a copy to the rx buffer.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) complete(&xfer->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * ti_sci_get_one_xfer() - Allocate one message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * @info: Pointer to SCI entity information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * @msg_type: Message type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * @msg_flags: Flag to set for the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * @tx_message_size: transmit message size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @rx_message_size: receive message size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Helper function which is used by various command functions that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * exposed to clients of this driver for allocating a message traffic event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * This function can sleep depending on pending requests already in the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * for the SCI entity. Further, this also holds a spinlock to maintain integrity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * of internal data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Return: 0 if all went fine, else corresponding error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u16 msg_type, u32 msg_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) size_t tx_message_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) size_t rx_message_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct ti_sci_xfers_info *minfo = &info->minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct ti_sci_msg_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u8 xfer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* Ensure we have sane transfer sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (rx_message_size > info->desc->max_msg_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) tx_message_size > info->desc->max_msg_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return ERR_PTR(-ERANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Ensure we have only controlled number of pending messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * Ideally, we might just have to wait a single message, be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * conservative and wait 5 times that..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ret = down_timeout(&minfo->sem_xfer_count, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Keep the locked section as small as possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) spin_lock_irqsave(&minfo->xfer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) info->desc->max_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) set_bit(bit_pos, minfo->xfer_alloc_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) spin_unlock_irqrestore(&minfo->xfer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * We already ensured in probe that we can have max messages that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * fit in hdr.seq - NOTE: this improves access latencies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * to predictable O(1) access, BUT, it opens us to risk if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * remote misbehaves with corrupted message sequence responses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * If that happens, we are going to be messed up anyways..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) xfer_id = (u8)bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) xfer = &minfo->xfer_block[xfer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) xfer->tx_message.len = tx_message_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) xfer->rx_len = (u8)rx_message_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) reinit_completion(&xfer->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) hdr->seq = xfer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) hdr->type = msg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) hdr->host = info->host_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) hdr->flags = msg_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * ti_sci_put_one_xfer() - Release a message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * @minfo: transfer info pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * @xfer: message that was reserved by ti_sci_get_one_xfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * This holds a spinlock to maintain integrity of internal data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct ti_sci_xfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct ti_sci_msg_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) u8 xfer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) xfer_id = hdr->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Keep the locked section as small as possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * NOTE: we might escape with smp_mb and no lock here..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * but just be conservative and symmetric.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) spin_lock_irqsave(&minfo->xfer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) clear_bit(xfer_id, minfo->xfer_alloc_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) spin_unlock_irqrestore(&minfo->xfer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Increment the count for the next user to get through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) up(&minfo->sem_xfer_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * ti_sci_do_xfer() - Do one transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * @info: Pointer to SCI entity information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * @xfer: Transfer to initiate and wait for response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Return: -ETIMEDOUT in case of no response, if transmit error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * return corresponding error, else if all goes well,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline int ti_sci_do_xfer(struct ti_sci_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct ti_sci_xfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct device *dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* And we wait for the response. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!wait_for_completion_timeout(&xfer->done, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * NOTE: we might prefer not to need the mailbox ticker to manage the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * transfer queueing since the protocol layer queues things by itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Unfortunately, we have to kick the mailbox framework after we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * received our message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) mbox_client_txdone(info->chan_tx, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * @info: Pointer to SCI entity information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Updates the SCI information in the internal data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct device *dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct ti_sci_handle *handle = &info->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct ti_sci_version_info *ver = &handle->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct ti_sci_msg_resp_version *rev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) sizeof(struct ti_sci_msg_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) sizeof(*rev_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ver->abi_major = rev_info->abi_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ver->abi_minor = rev_info->abi_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ver->firmware_revision = rev_info->firmware_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) strncpy(ver->firmware_description, rev_info->firmware_description,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) sizeof(ver->firmware_description));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @r: pointer to response buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * Return: true if the response was an ACK, else returns false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static inline bool ti_sci_is_response_ack(void *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct ti_sci_msg_hdr *hdr = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * ti_sci_set_device_state() - Set device state helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * @id: Device identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * @flags: flags to setup for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * @state: State to move the device to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) u32 id, u32 flags, u8 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct ti_sci_msg_req_set_device_state *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) req->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) req->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * ti_sci_get_device_state() - Get device state helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * @handle: Handle to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * @clcnt: Pointer to Context Loss Count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * @resets: pointer to resets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * @p_state: pointer to p_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * @c_state: pointer to c_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) u32 id, u32 *clcnt, u32 *resets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) u8 *p_state, u8 *c_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct ti_sci_msg_req_get_device_state *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct ti_sci_msg_resp_get_device_state *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!clcnt && !resets && !p_state && !c_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) req->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (!ti_sci_is_response_ack(resp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (clcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) *clcnt = resp->context_loss_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (resets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) *resets = resp->resets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (p_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) *p_state = resp->programmed_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (c_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) *c_state = resp->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * ti_sci_cmd_get_device() - command to request for device managed by TISCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * that can be shared with other hosts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Request for the device - NOTE: the client MUST maintain integrity of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * usage count by balancing get_device with put_device. No refcounting is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * managed by driver for that purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return ti_sci_set_device_state(handle, id, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) MSG_DEVICE_SW_STATE_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * TISCI that is exclusively owned by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * requesting host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * Request for the device - NOTE: the client MUST maintain integrity of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * usage count by balancing get_device with put_device. No refcounting is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * managed by driver for that purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return ti_sci_set_device_state(handle, id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) MSG_FLAG_DEVICE_EXCLUSIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) MSG_DEVICE_SW_STATE_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * Request for the device - NOTE: the client MUST maintain integrity of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * usage count by balancing get_device with put_device. No refcounting is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * managed by driver for that purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return ti_sci_set_device_state(handle, id, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) MSG_DEVICE_SW_STATE_RETENTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * TISCI that is exclusively owned by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * requesting host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * Request for the device - NOTE: the client MUST maintain integrity of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * usage count by balancing get_device with put_device. No refcounting is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * managed by driver for that purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return ti_sci_set_device_state(handle, id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) MSG_FLAG_DEVICE_EXCLUSIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) MSG_DEVICE_SW_STATE_RETENTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * ti_sci_cmd_put_device() - command to release a device managed by TISCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * Request for the device - NOTE: the client MUST maintain integrity of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * usage count by balancing get_device with put_device. No refcounting is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * managed by driver for that purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return ti_sci_set_device_state(handle, id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * ti_sci_cmd_dev_is_valid() - Is the device valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * Return: 0 if all went fine and the device ID is valid, else return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) u8 unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* check the device state which will also tell us if the ID is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @handle: Pointer to TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * @count: Pointer to Context Loss counter to populate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) u32 *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * @handle: Pointer to TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * @r_state: true if requested to be idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) bool *r_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) u8 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (!r_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * @handle: Pointer to TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * @r_state: true if requested to be stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * @curr_state: true if currently stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) bool *r_state, bool *curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u8 p_state, c_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!r_state && !curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ret =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (r_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * @handle: Pointer to TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * @r_state: true if requested to be ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * @curr_state: true if currently ON and active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) bool *r_state, bool *curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) u8 p_state, c_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!r_state && !curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ret =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (r_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * @handle: Pointer to TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * @curr_state: true if currently transitioning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) bool *curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) u8 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * ti_sci_cmd_set_device_resets() - command to set resets for device managed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * by TISCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * @reset_state: Device specific reset bit field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) u32 id, u32 reset_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct ti_sci_msg_req_set_device_resets *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) req->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) req->resets = reset_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * ti_sci_cmd_get_device_resets() - Get reset state for device managed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * by TISCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * @handle: Pointer to TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * @id: Device Identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * @reset_state: Pointer to reset state to populate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) u32 id, u32 *reset_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * ti_sci_set_clock_state() - Set clock state helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * @flags: Header flags as needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * @state: State to request for the clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) u32 dev_id, u32 clk_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) u32 flags, u8 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct ti_sci_msg_req_set_clock_state *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) req->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (clk_id < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) req->clk_id = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) req->clk_id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) req->clk_id_32 = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) req->request_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * ti_sci_cmd_get_clock_state() - Get clock state helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * @programmed_state: State requested for clock to move to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * @current_state: State that the clock is currently in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) u32 dev_id, u32 clk_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u8 *programmed_state, u8 *current_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct ti_sci_msg_req_get_clock_state *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct ti_sci_msg_resp_get_clock_state *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (!programmed_state && !current_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) req->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (clk_id < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) req->clk_id = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) req->clk_id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) req->clk_id_32 = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (!ti_sci_is_response_ack(resp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (programmed_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) *programmed_state = resp->programmed_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (current_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) *current_state = resp->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * @can_change_freq: 'true' if frequency change is desired, else 'false'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * @enable_input_term: 'true' if input termination is desired, else 'false'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) u32 clk_id, bool needs_ssc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) bool can_change_freq, bool enable_input_term)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) MSG_CLOCK_SW_STATE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * NOTE: This clock must have been requested by get_clock previously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) u32 dev_id, u32 clk_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return ti_sci_set_clock_state(handle, dev_id, clk_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) MSG_CLOCK_SW_STATE_UNREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * NOTE: This clock must have been requested by get_clock previously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) u32 dev_id, u32 clk_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return ti_sci_set_clock_state(handle, dev_id, clk_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) MSG_CLOCK_SW_STATE_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * @req_state: state indicating if the clock is auto managed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) u32 dev_id, u32 clk_id, bool *req_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) u8 state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (!req_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * ti_sci_cmd_clk_is_on() - Is the clock ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * @req_state: state indicating if the clock is managed by us and enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * @curr_state: state indicating if the clock is ready for operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) u32 clk_id, bool *req_state, bool *curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) u8 c_state = 0, r_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (!req_state && !curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) &r_state, &c_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (req_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * ti_sci_cmd_clk_is_off() - Is the clock OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * @req_state: state indicating if the clock is managed by us and disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * @curr_state: state indicating if the clock is NOT ready for operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) u32 clk_id, bool *req_state, bool *curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) u8 c_state = 0, r_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (!req_state && !curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) &r_state, &c_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (req_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (curr_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * @parent_id: Parent clock identifier to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) u32 dev_id, u32 clk_id, u32 parent_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct ti_sci_msg_req_set_clock_parent *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) req->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (clk_id < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) req->clk_id = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) req->clk_id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) req->clk_id_32 = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (parent_id < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) req->parent_id = parent_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) req->parent_id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) req->parent_id_32 = parent_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * ti_sci_cmd_clk_get_parent() - Get current parent clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * @parent_id: Current clock parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) u32 dev_id, u32 clk_id, u32 *parent_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct ti_sci_msg_req_get_clock_parent *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct ti_sci_msg_resp_get_clock_parent *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (!handle || !parent_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) req->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (clk_id < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) req->clk_id = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) req->clk_id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) req->clk_id_32 = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (!ti_sci_is_response_ack(resp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (resp->parent_id < 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) *parent_id = resp->parent_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) *parent_id = resp->parent_id_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * @num_parents: Returns he number of parents to the current clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) u32 dev_id, u32 clk_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) u32 *num_parents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) struct ti_sci_msg_req_get_clock_num_parents *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct ti_sci_msg_resp_get_clock_num_parents *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (!handle || !num_parents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) req->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (clk_id < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) req->clk_id = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) req->clk_id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) req->clk_id_32 = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (!ti_sci_is_response_ack(resp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (resp->num_parents < 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) *num_parents = resp->num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) *num_parents = resp->num_parents_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * @min_freq: The minimum allowable frequency in Hz. This is the minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * allowable programmed frequency and does not account for clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * tolerances and jitter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * @target_freq: The target clock frequency in Hz. A frequency will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * processed as close to this target frequency as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * @max_freq: The maximum allowable frequency in Hz. This is the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * allowable programmed frequency and does not account for clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * tolerances and jitter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * @match_freq: Frequency match in Hz response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) u32 dev_id, u32 clk_id, u64 min_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) u64 target_freq, u64 max_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) u64 *match_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct ti_sci_msg_req_query_clock_freq *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct ti_sci_msg_resp_query_clock_freq *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (!handle || !match_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) req->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (clk_id < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) req->clk_id = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) req->clk_id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) req->clk_id_32 = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) req->min_freq_hz = min_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) req->target_freq_hz = target_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) req->max_freq_hz = max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (!ti_sci_is_response_ack(resp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) *match_freq = resp->freq_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * @min_freq: The minimum allowable frequency in Hz. This is the minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * allowable programmed frequency and does not account for clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * tolerances and jitter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * @target_freq: The target clock frequency in Hz. A frequency will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * processed as close to this target frequency as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * @max_freq: The maximum allowable frequency in Hz. This is the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * allowable programmed frequency and does not account for clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * tolerances and jitter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) u32 dev_id, u32 clk_id, u64 min_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) u64 target_freq, u64 max_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct ti_sci_msg_req_set_clock_freq *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) req->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (clk_id < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) req->clk_id = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) req->clk_id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) req->clk_id_32 = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) req->min_freq_hz = min_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) req->target_freq_hz = target_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) req->max_freq_hz = max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * ti_sci_cmd_clk_get_freq() - Get current frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * @handle: pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * @dev_id: Device identifier this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * @clk_id: Clock identifier for the device for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * Each device has it's own set of clock inputs. This indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * which clock input to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * @freq: Currently frequency in Hz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) u32 dev_id, u32 clk_id, u64 *freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct ti_sci_msg_req_get_clock_freq *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct ti_sci_msg_resp_get_clock_freq *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (!handle || !freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) req->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (clk_id < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) req->clk_id = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) req->clk_id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) req->clk_id_32 = clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (!ti_sci_is_response_ack(resp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) *freq = resp->freq_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) struct ti_sci_msg_req_reboot *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (!ti_sci_is_response_ack(resp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * ti_sci_get_resource_range - Helper to get a range of resources assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * to a host. Resource is uniquely identified by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * type and subtype.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * @dev_id: TISCI device ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * @subtype: Resource assignment subtype that is being requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * from the given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * @s_host: Host processor ID to which the resources are allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * @range_start: Start index of the resource range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * @range_num: Number of resources in the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) u32 dev_id, u8 subtype, u8 s_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) u16 *range_start, u16 *range_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) struct ti_sci_msg_resp_get_resource_range *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) struct ti_sci_msg_req_get_resource_range *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) req->secondary_host = s_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (!ti_sci_is_response_ack(resp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) } else if (!resp->range_start && !resp->range_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) *range_start = resp->range_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) *range_num = resp->range_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * that is same as ti sci interface host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * @dev_id: TISCI device ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) * @subtype: Resource assignment subtype that is being requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * from the given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * @range_start: Start index of the resource range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * @range_num: Number of resources in the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) u32 dev_id, u8 subtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) u16 *range_start, u16 *range_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) return ti_sci_get_resource_range(handle, dev_id, subtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) TI_SCI_IRQ_SECONDARY_HOST_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) range_start, range_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * assigned to a specified host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * @dev_id: TISCI device ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * @subtype: Resource assignment subtype that is being requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * from the given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * @s_host: Host processor ID to which the resources are allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * @range_start: Start index of the resource range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * @range_num: Number of resources in the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) u32 dev_id, u8 subtype, u8 s_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) u16 *range_start, u16 *range_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) range_start, range_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) * ti_sci_manage_irq() - Helper api to configure/release the irq route between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) * the requested source and destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * @valid_params: Bit fields defining the validity of certain params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * @src_id: Device ID of the IRQ source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * @src_index: IRQ source index within the source device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * @dst_id: Device ID of the IRQ destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * @dst_host_irq: IRQ number of the destination device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * @ia_id: Device ID of the IA, if the IRQ flows through this IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * @vint: Virtual interrupt to be used within the IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * @global_event: Global event number to be used for the requesting event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * @vint_status_bit: Virtual interrupt status bit to be used for the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) * @s_host: Secondary host ID to which the irq/event is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * requested for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * @type: Request type irq set or release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) u32 valid_params, u16 src_id, u16 src_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) u16 global_event, u8 vint_status_bit, u8 s_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) u16 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct ti_sci_msg_req_manage_irq *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) req->valid_params = valid_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) req->src_id = src_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) req->src_index = src_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) req->dst_id = dst_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) req->dst_host_irq = dst_host_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) req->ia_id = ia_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) req->vint = vint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) req->global_event = global_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) req->vint_status_bit = vint_status_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) req->secondary_host = s_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * ti_sci_set_irq() - Helper api to configure the irq route between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * requested source and destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * @valid_params: Bit fields defining the validity of certain params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * @src_id: Device ID of the IRQ source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * @src_index: IRQ source index within the source device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * @dst_id: Device ID of the IRQ destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * @dst_host_irq: IRQ number of the destination device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) * @ia_id: Device ID of the IA, if the IRQ flows through this IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * @vint: Virtual interrupt to be used within the IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) * @global_event: Global event number to be used for the requesting event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) * @vint_status_bit: Virtual interrupt status bit to be used for the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * @s_host: Secondary host ID to which the irq/event is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * requested for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) u16 src_id, u16 src_index, u16 dst_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) u16 dst_host_irq, u16 ia_id, u16 vint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) u16 global_event, u8 vint_status_bit, u8 s_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) __func__, valid_params, src_id, src_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) dst_id, dst_host_irq, ia_id, vint, global_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) vint_status_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) dst_id, dst_host_irq, ia_id, vint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) global_event, vint_status_bit, s_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) TI_SCI_MSG_SET_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * ti_sci_free_irq() - Helper api to free the irq route between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * requested source and destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * @valid_params: Bit fields defining the validity of certain params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * @src_id: Device ID of the IRQ source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * @src_index: IRQ source index within the source device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * @dst_id: Device ID of the IRQ destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * @dst_host_irq: IRQ number of the destination device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * @ia_id: Device ID of the IA, if the IRQ flows through this IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * @vint: Virtual interrupt to be used within the IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * @global_event: Global event number to be used for the requesting event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * @vint_status_bit: Virtual interrupt status bit to be used for the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * @s_host: Secondary host ID to which the irq/event is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * requested for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) u16 src_id, u16 src_index, u16 dst_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) u16 dst_host_irq, u16 ia_id, u16 vint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) u16 global_event, u8 vint_status_bit, u8 s_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) __func__, valid_params, src_id, src_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) dst_id, dst_host_irq, ia_id, vint, global_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) vint_status_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) dst_id, dst_host_irq, ia_id, vint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) global_event, vint_status_bit, s_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) TI_SCI_MSG_FREE_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * source and destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * @src_id: Device ID of the IRQ source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * @src_index: IRQ source index within the source device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * @dst_id: Device ID of the IRQ destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * @dst_host_irq: IRQ number of the destination device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) * @vint_irq: Boolean specifying if this interrupt belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * Interrupt Aggregator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) u16 src_index, u16 dst_id, u16 dst_host_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) dst_host_irq, 0, 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * requested source and Interrupt Aggregator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * @src_id: Device ID of the IRQ source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * @src_index: IRQ source index within the source device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * @ia_id: Device ID of the IA, if the IRQ flows through this IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * @vint: Virtual interrupt to be used within the IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * @global_event: Global event number to be used for the requesting event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * @vint_status_bit: Virtual interrupt status bit to be used for the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) u16 src_id, u16 src_index, u16 ia_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) u16 vint, u16 global_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) u8 vint_status_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) MSG_FLAG_GLB_EVNT_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) MSG_FLAG_VINT_STS_BIT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) ia_id, vint, global_event, vint_status_bit, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) * ti_sci_cmd_free_irq() - Free a host irq route between the between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) * requested source and destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * @src_id: Device ID of the IRQ source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * @src_index: IRQ source index within the source device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * @dst_id: Device ID of the IRQ destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * @dst_host_irq: IRQ number of the destination device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * @vint_irq: Boolean specifying if this interrupt belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * Interrupt Aggregator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) u16 src_index, u16 dst_id, u16 dst_host_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) dst_host_irq, 0, 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * ti_sci_cmd_free_event_map() - Free an event map between the requested source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * and Interrupt Aggregator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * @handle: Pointer to TISCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * @src_id: Device ID of the IRQ source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * @src_index: IRQ source index within the source device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * @ia_id: Device ID of the IA, if the IRQ flows through this IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * @vint: Virtual interrupt to be used within the IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * @global_event: Global event number to be used for the requesting event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * @vint_status_bit: Virtual interrupt status bit to be used for the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * Return: 0 if all went fine, else return appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) u16 src_id, u16 src_index, u16 ia_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) u16 vint, u16 global_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) u8 vint_status_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) u32 valid_params = MSG_FLAG_IA_ID_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) MSG_FLAG_VINT_STS_BIT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) ia_id, vint, global_event, vint_status_bit, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * ti_sci_cmd_ring_config() - configure RA ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * @handle: Pointer to TI SCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * @valid_params: Bitfield defining validity of ring configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * @nav_id: Device ID of Navigator Subsystem from which the ring is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * @index: Ring index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * @addr_lo: The ring base address lo 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * @addr_hi: The ring base address hi 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * @count: Number of ring elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) * @mode: The mode of the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * @size: The ring element size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * @order_id: Specifies the ring's bus order ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) * See @ti_sci_msg_rm_ring_cfg_req for more info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) u32 valid_params, u16 nav_id, u16 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) u32 addr_lo, u32 addr_hi, u32 count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) u8 mode, u8 size, u8 order_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) struct ti_sci_msg_rm_ring_cfg_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (IS_ERR_OR_NULL(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) req->valid_params = valid_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) req->nav_id = nav_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) req->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) req->addr_lo = addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) req->addr_hi = addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) req->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) req->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) req->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) req->order_id = order_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) * ti_sci_cmd_ring_get_config() - get RA ring configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * @handle: Pointer to TI SCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * @nav_id: Device ID of Navigator Subsystem from which the ring is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * @index: Ring index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * @addr_lo: Returns ring's base address lo 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) * @addr_hi: Returns ring's base address hi 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * @count: Returns number of ring elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * @mode: Returns mode of the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * @size: Returns ring element size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * @order_id: Returns ring's bus order ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) u32 nav_id, u32 index, u8 *mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) u32 *addr_lo, u32 *addr_hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) u32 *count, u8 *size, u8 *order_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) struct ti_sci_msg_rm_ring_get_cfg_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) if (IS_ERR_OR_NULL(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) "RM_RA:Message get config failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) req->nav_id = nav_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) req->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (!ti_sci_is_response_ack(resp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) *mode = resp->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (addr_lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) *addr_lo = resp->addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (addr_hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) *addr_hi = resp->addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) *count = resp->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) *size = resp->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (order_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) *order_id = resp->order_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) * @handle: Pointer to TI SCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) * @nav_id: Device ID of Navigator Subsystem which should be used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * pairing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * @src_thread: Source PSI-L thread ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) * @dst_thread: Destination PSI-L thread ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) u32 nav_id, u32 src_thread, u32 dst_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) struct ti_sci_msg_psil_pair *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) req->nav_id = nav_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) req->src_thread = src_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) req->dst_thread = dst_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * @handle: Pointer to TI SCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * @nav_id: Device ID of Navigator Subsystem which should be used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * unpairing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * @src_thread: Source PSI-L thread ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) * @dst_thread: Destination PSI-L thread ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) u32 nav_id, u32 src_thread, u32 dst_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) struct ti_sci_msg_psil_unpair *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) req->nav_id = nav_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) req->src_thread = src_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) req->dst_thread = dst_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) * @handle: Pointer to TI SCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) * structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) * more info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (IS_ERR_OR_NULL(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) req->valid_params = params->valid_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) req->nav_id = params->nav_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) req->index = params->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) req->tx_pause_on_err = params->tx_pause_on_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) req->tx_filt_einfo = params->tx_filt_einfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) req->tx_filt_pswords = params->tx_filt_pswords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) req->tx_atype = params->tx_atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) req->tx_chan_type = params->tx_chan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) req->tx_supr_tdpkt = params->tx_supr_tdpkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) req->tx_fetch_size = params->tx_fetch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) req->tx_credit_count = params->tx_credit_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) req->txcq_qnum = params->txcq_qnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) req->tx_priority = params->tx_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) req->tx_qos = params->tx_qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) req->tx_orderid = params->tx_orderid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) req->fdepth = params->fdepth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) req->tx_sched_priority = params->tx_sched_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) req->tx_burst_size = params->tx_burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * @handle: Pointer to TI SCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * more info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (IS_ERR_OR_NULL(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) req->valid_params = params->valid_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) req->nav_id = params->nav_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) req->index = params->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) req->rx_fetch_size = params->rx_fetch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) req->rxcq_qnum = params->rxcq_qnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) req->rx_priority = params->rx_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) req->rx_qos = params->rx_qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) req->rx_orderid = params->rx_orderid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) req->rx_sched_priority = params->rx_sched_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) req->flowid_start = params->flowid_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) req->flowid_cnt = params->flowid_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) req->rx_pause_on_err = params->rx_pause_on_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) req->rx_atype = params->rx_atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) req->rx_chan_type = params->rx_chan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) req->rx_ignore_short = params->rx_ignore_short;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) req->rx_ignore_long = params->rx_ignore_long;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) req->rx_burst_size = params->rx_burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * @handle: Pointer to TI SCI handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) * structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * more info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) const struct ti_sci_msg_rm_udmap_flow_cfg *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (IS_ERR_OR_NULL(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) req->valid_params = params->valid_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) req->nav_id = params->nav_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) req->flow_index = params->flow_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) req->rx_einfo_present = params->rx_einfo_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) req->rx_psinfo_present = params->rx_psinfo_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) req->rx_error_handling = params->rx_error_handling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) req->rx_desc_type = params->rx_desc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) req->rx_sop_offset = params->rx_sop_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) req->rx_dest_qnum = params->rx_dest_qnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) req->rx_src_tag_hi = params->rx_src_tag_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) req->rx_src_tag_lo = params->rx_src_tag_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) req->rx_dest_tag_hi = params->rx_dest_tag_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) req->rx_dest_tag_lo = params->rx_dest_tag_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) req->rx_fdq1_qnum = params->rx_fdq1_qnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) req->rx_fdq2_qnum = params->rx_fdq2_qnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) req->rx_fdq3_qnum = params->rx_fdq3_qnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) req->rx_ps_location = params->rx_ps_location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) * ti_sci_cmd_proc_request() - Command to request a physical processor control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) * @handle: Pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) * @proc_id: Processor ID this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) u8 proc_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) struct ti_sci_msg_req_proc_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) req->processor_id = proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) * ti_sci_cmd_proc_release() - Command to release a physical processor control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) * @handle: Pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) * @proc_id: Processor ID this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) u8 proc_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) struct ti_sci_msg_req_proc_release *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) req->processor_id = proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) * ti_sci_cmd_proc_handover() - Command to handover a physical processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * control to a host in the processor's access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) * control list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) * @handle: Pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) * @proc_id: Processor ID this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) * @host_id: Host ID to get the control of the processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) u8 proc_id, u8 host_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct ti_sci_msg_req_proc_handover *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) req->processor_id = proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) req->host_id = host_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * ti_sci_cmd_proc_set_config() - Command to set the processor boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * configuration flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * @handle: Pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) * @proc_id: Processor ID this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * @config_flags_set: Configuration flags to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * @config_flags_clear: Configuration flags to be cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) u8 proc_id, u64 bootvector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) u32 config_flags_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) u32 config_flags_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) struct ti_sci_msg_req_set_config *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) req->processor_id = proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) TI_SCI_ADDR_HIGH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) req->config_flags_set = config_flags_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) req->config_flags_clear = config_flags_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) * ti_sci_cmd_proc_set_control() - Command to set the processor boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) * control flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) * @handle: Pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) * @proc_id: Processor ID this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * @control_flags_set: Control flags to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) * @control_flags_clear: Control flags to be cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) u8 proc_id, u32 control_flags_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) u32 control_flags_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) struct ti_sci_msg_req_set_ctrl *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) struct ti_sci_msg_hdr *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) req->processor_id = proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) req->control_flags_set = control_flags_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) req->control_flags_clear = control_flags_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) * @handle: Pointer to TI SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) * @proc_id: Processor ID this request is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) * Return: 0 if all went well, else returns appropriate error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) u8 proc_id, u64 *bv, u32 *cfg_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) u32 *ctrl_flags, u32 *sts_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) struct ti_sci_msg_resp_get_status *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) struct ti_sci_msg_req_get_status *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) sizeof(*req), sizeof(*resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) if (IS_ERR(xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) ret = PTR_ERR(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) dev_err(dev, "Message alloc failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) req->processor_id = proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) ret = ti_sci_do_xfer(info, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) dev_err(dev, "Mbox send fail %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (!ti_sci_is_response_ack(resp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) TI_SCI_ADDR_HIGH_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) *cfg_flags = resp->config_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) *ctrl_flags = resp->control_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) *sts_flags = resp->status_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) ti_sci_put_one_xfer(&info->minfo, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) * ti_sci_setup_ops() - Setup the operations structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) * @info: pointer to TISCI pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) static void ti_sci_setup_ops(struct ti_sci_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) struct ti_sci_ops *ops = &info->handle.ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) struct ti_sci_core_ops *core_ops = &ops->core_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) struct ti_sci_dev_ops *dops = &ops->dev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) struct ti_sci_clk_ops *cops = &ops->clk_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) struct ti_sci_proc_ops *pops = &ops->proc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) core_ops->reboot_device = ti_sci_cmd_core_reboot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) dops->get_device = ti_sci_cmd_get_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) dops->idle_device = ti_sci_cmd_idle_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) dops->put_device = ti_sci_cmd_put_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) dops->is_valid = ti_sci_cmd_dev_is_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) dops->is_idle = ti_sci_cmd_dev_is_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) dops->is_stop = ti_sci_cmd_dev_is_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) dops->is_on = ti_sci_cmd_dev_is_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) dops->is_transitioning = ti_sci_cmd_dev_is_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) dops->set_device_resets = ti_sci_cmd_set_device_resets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) dops->get_device_resets = ti_sci_cmd_get_device_resets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) cops->get_clock = ti_sci_cmd_get_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) cops->idle_clock = ti_sci_cmd_idle_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) cops->put_clock = ti_sci_cmd_put_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) cops->is_auto = ti_sci_cmd_clk_is_auto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) cops->is_on = ti_sci_cmd_clk_is_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) cops->is_off = ti_sci_cmd_clk_is_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) cops->set_parent = ti_sci_cmd_clk_set_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) cops->get_parent = ti_sci_cmd_clk_get_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) cops->set_freq = ti_sci_cmd_clk_set_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) cops->get_freq = ti_sci_cmd_clk_get_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) rm_core_ops->get_range_from_shost =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) ti_sci_cmd_get_resource_range_from_shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) iops->set_irq = ti_sci_cmd_set_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) iops->set_event_map = ti_sci_cmd_set_event_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) iops->free_irq = ti_sci_cmd_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) iops->free_event_map = ti_sci_cmd_free_event_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) rops->config = ti_sci_cmd_ring_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) rops->get_config = ti_sci_cmd_ring_get_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) psilops->pair = ti_sci_cmd_rm_psil_pair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) psilops->unpair = ti_sci_cmd_rm_psil_unpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) pops->request = ti_sci_cmd_proc_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) pops->release = ti_sci_cmd_proc_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) pops->handover = ti_sci_cmd_proc_handover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) pops->set_config = ti_sci_cmd_proc_set_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) pops->set_control = ti_sci_cmd_proc_set_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) pops->get_status = ti_sci_cmd_proc_get_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) * ti_sci_get_handle() - Get the TI SCI handle for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) * @dev: Pointer to device for which we want SCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) * NOTE: The function does not track individual clients of the framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) * and is expected to be maintained by caller of TI SCI protocol library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * Return: pointer to handle if successful, else:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) * -EPROBE_DEFER if the instance is not ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) * -ENODEV if the required node handler is missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) * -EINVAL if invalid conditions are encountered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) struct device_node *ti_sci_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) struct ti_sci_handle *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) pr_err("I need a device pointer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) ti_sci_np = of_get_parent(dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) if (!ti_sci_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) dev_err(dev, "No OF information\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) mutex_lock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) list_for_each(p, &ti_sci_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) info = list_entry(p, struct ti_sci_info, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) if (ti_sci_np == info->dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) handle = &info->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) info->users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) mutex_unlock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) of_node_put(ti_sci_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) return ERR_PTR(-EPROBE_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) EXPORT_SYMBOL_GPL(ti_sci_get_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) * @handle: Handle acquired by ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) * NOTE: The function does not track individual clients of the framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) * and is expected to be maintained by caller of TI SCI protocol library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) * Return: 0 is successfully released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) * if an error pointer was passed, it returns the error value back,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) * if null was passed, it returns -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) int ti_sci_put_handle(const struct ti_sci_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) info = handle_to_ti_sci_info(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) mutex_lock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) if (!WARN_ON(!info->users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) info->users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) mutex_unlock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) EXPORT_SYMBOL_GPL(ti_sci_put_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) static void devm_ti_sci_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) const struct ti_sci_handle **ptr = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) const struct ti_sci_handle *handle = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) ret = ti_sci_put_handle(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) dev_err(dev, "failed to put handle %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) * devm_ti_sci_get_handle() - Managed get handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) * @dev: device for which we want SCI handle for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) * NOTE: This releases the handle once the device resources are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) * no longer needed. MUST NOT BE released with ti_sci_put_handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) * The function does not track individual clients of the framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) * and is expected to be maintained by caller of TI SCI protocol library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) * Return: 0 if all went fine, else corresponding error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) const struct ti_sci_handle **ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) const struct ti_sci_handle *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) handle = ti_sci_get_handle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) if (!IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) *ptr = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) * @np: device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) * @property: property name containing phandle on TISCI node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) * NOTE: The function does not track individual clients of the framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) * and is expected to be maintained by caller of TI SCI protocol library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) * Return: pointer to handle if successful, else:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) * -EPROBE_DEFER if the instance is not ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) * -ENODEV if the required node handler is missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) * -EINVAL if invalid conditions are encountered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) const char *property)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) struct ti_sci_handle *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) struct device_node *ti_sci_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) pr_err("I need a device pointer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) ti_sci_np = of_parse_phandle(np, property, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) if (!ti_sci_np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) mutex_lock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) list_for_each(p, &ti_sci_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) info = list_entry(p, struct ti_sci_info, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (ti_sci_np == info->dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) handle = &info->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) info->users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) mutex_unlock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) of_node_put(ti_sci_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) return ERR_PTR(-EPROBE_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) * @dev: Device pointer requesting TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) * @property: property name containing phandle on TISCI node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) * NOTE: This releases the handle once the device resources are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) * no longer needed. MUST NOT BE released with ti_sci_put_handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) * The function does not track individual clients of the framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) * and is expected to be maintained by caller of TI SCI protocol library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) * Return: 0 if all went fine, else corresponding error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) const char *property)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) const struct ti_sci_handle *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) const struct ti_sci_handle **ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (!IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) *ptr = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) * @res: Pointer to the TISCI resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) u16 set, free_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) raw_spin_lock_irqsave(&res->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) for (set = 0; set < res->sets; set++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) free_bit = find_first_zero_bit(res->desc[set].res_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) res->desc[set].num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) if (free_bit != res->desc[set].num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) set_bit(free_bit, res->desc[set].res_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) raw_spin_unlock_irqrestore(&res->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) return res->desc[set].start + free_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) raw_spin_unlock_irqrestore(&res->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) return TI_SCI_RESOURCE_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) * ti_sci_release_resource() - Release a resource from TISCI resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) * @res: Pointer to the TISCI resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) * @id: Resource id to be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) u16 set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) raw_spin_lock_irqsave(&res->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) for (set = 0; set < res->sets; set++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) if (res->desc[set].start <= id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) (res->desc[set].num + res->desc[set].start) > id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) clear_bit(id - res->desc[set].start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) res->desc[set].res_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) raw_spin_unlock_irqrestore(&res->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) EXPORT_SYMBOL_GPL(ti_sci_release_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) * @res: Pointer to the TISCI resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) * Return: Total number of available resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) u32 set, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) for (set = 0; set < res->sets; set++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) count += res->desc[set].num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) * @handle: TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) * @dev: Device pointer to which the resource is assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) * @dev_id: TISCI device id to which the resource is assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) * @sub_types: Array of sub_types assigned corresponding to device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) * @sets: Number of sub_types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) * Return: Pointer to ti_sci_resource if all went well else appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) * error pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) static struct ti_sci_resource *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) struct device *dev, u32 dev_id, u32 *sub_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) u32 sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) struct ti_sci_resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) bool valid_set = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) res->sets = sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) if (!res->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) for (i = 0; i < res->sets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) sub_types[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) &res->desc[i].start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) &res->desc[i].num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) dev_id, sub_types[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) res->desc[i].start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) res->desc[i].num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) dev_id, sub_types[i], res->desc[i].start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) res->desc[i].num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) valid_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) res->desc[i].res_map =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) sizeof(*res->desc[i].res_map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) if (!res->desc[i].res_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) raw_spin_lock_init(&res->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) if (valid_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) * @handle: TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) * @dev: Device pointer to which the resource is assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) * @dev_id: TISCI device id to which the resource is assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) * @of_prop: property name by which the resource are represented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) * Return: Pointer to ti_sci_resource if all went well else appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) * error pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) struct ti_sci_resource *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) struct device *dev, u32 dev_id, char *of_prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) struct ti_sci_resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) u32 *sub_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) int sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if (sets < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) dev_err(dev, "%s resource type ids not available\n", of_prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) return ERR_PTR(sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) if (!sub_types)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) kfree(sub_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) * devm_ti_sci_get_resource() - Get a resource range assigned to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) * @handle: TISCI handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) * @dev: Device pointer to which the resource is assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) * @dev_id: TISCI device id to which the resource is assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) * @suub_type: TISCI resource subytpe representing the resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) * Return: Pointer to ti_sci_resource if all went well else appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) * error pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) struct ti_sci_resource *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) u32 dev_id, u32 sub_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) void *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) const struct ti_sci_handle *handle = &info->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) ti_sci_cmd_core_reboot(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) /* call fail OR pass, we should not be here in the first place */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) return NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) /* Description for K2G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) .default_host_id = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) /* Conservative duration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) .max_rx_timeout_ms = 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) .max_msgs = 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) .max_msg_size = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) /* Description for AM654 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) .default_host_id = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) /* Conservative duration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) .max_rx_timeout_ms = 10000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) .max_msgs = 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) .max_msg_size = 60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) static const struct of_device_id ti_sci_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) { /* Sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) MODULE_DEVICE_TABLE(of, ti_sci_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) static int ti_sci_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) const struct ti_sci_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) struct ti_sci_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) struct ti_sci_info *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) struct ti_sci_xfers_info *minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) struct mbox_client *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) int reboot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) u32 h_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) of_id = of_match_device(ti_sci_of_match, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) if (!of_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) dev_err(dev, "OF data missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) desc = of_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) info->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) info->desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) /* if the property is not present in DT, use a default from desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) info->host_id = info->desc->default_host_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) if (!h_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) dev_warn(dev, "Host ID 0 is reserved for firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) info->host_id = info->desc->default_host_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) info->host_id = h_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) reboot = of_property_read_bool(dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) "ti,system-reboot-controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) INIT_LIST_HEAD(&info->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) minfo = &info->minfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) * Pre-allocate messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) * NEVER allocate more than what we can indicate in hdr.seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) * if we have data description bug, force a fix..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) if (WARN_ON(desc->max_msgs >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) minfo->xfer_block = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) desc->max_msgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) sizeof(*minfo->xfer_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) if (!minfo->xfer_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) minfo->xfer_alloc_table = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) BITS_TO_LONGS(desc->max_msgs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) if (!minfo->xfer_alloc_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) /* Pre-initialize the buffer pointer to pre-allocated buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) if (!xfer->xfer_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) xfer->tx_message.buf = xfer->xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) init_completion(&xfer->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) ret = ti_sci_debugfs_create(pdev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) dev_warn(dev, "Failed to create debug file\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) platform_set_drvdata(pdev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) cl = &info->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) cl->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) cl->tx_block = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) cl->rx_callback = ti_sci_rx_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) cl->knows_txdone = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) spin_lock_init(&minfo->xfer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) sema_init(&minfo->sem_xfer_count, desc->max_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) info->chan_rx = mbox_request_channel_byname(cl, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) if (IS_ERR(info->chan_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) ret = PTR_ERR(info->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) info->chan_tx = mbox_request_channel_byname(cl, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) if (IS_ERR(info->chan_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) ret = PTR_ERR(info->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) ret = ti_sci_cmd_get_revision(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) ti_sci_setup_ops(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) if (reboot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) info->nb.notifier_call = tisci_reboot_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) info->nb.priority = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) ret = register_restart_handler(&info->nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) dev_err(dev, "reboot registration fail(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) info->handle.version.abi_major, info->handle.version.abi_minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) info->handle.version.firmware_revision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) info->handle.version.firmware_description);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) mutex_lock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) list_add_tail(&info->node, &ti_sci_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) mutex_unlock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) return of_platform_populate(dev->of_node, NULL, NULL, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) if (!IS_ERR(info->chan_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) mbox_free_channel(info->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) if (!IS_ERR(info->chan_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) mbox_free_channel(info->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) debugfs_remove(info->d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) static int ti_sci_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) struct ti_sci_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) of_platform_depopulate(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) info = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) if (info->nb.notifier_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) unregister_restart_handler(&info->nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) mutex_lock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) if (info->users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) list_del(&info->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) mutex_unlock(&ti_sci_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) ti_sci_debugfs_destroy(pdev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) /* Safe to free channels since no more users */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) mbox_free_channel(info->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) mbox_free_channel(info->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) static struct platform_driver ti_sci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) .probe = ti_sci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) .remove = ti_sci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) .name = "ti-sci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) .of_match_table = of_match_ptr(ti_sci_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) module_platform_driver(ti_sci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) MODULE_AUTHOR("Nishanth Menon");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) MODULE_ALIAS("platform:ti-sci");