Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) // SPI init/core code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) // Copyright (C) 2005 David Brownell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) // Copyright (C) 2008 Secret Lab Technologies Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/clk/clk-conf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/mod_devicetable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/spi/spi-mem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/pm_domain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/property.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/sched/rt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/platform_data/x86/apple.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <trace/events/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "internals.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static DEFINE_IDR(spi_master_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) static void spidev_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	struct spi_device	*spi = to_spi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	spi_controller_put(spi->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	kfree(spi->driver_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	kfree(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) modalias_show(struct device *dev, struct device_attribute *a, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	const struct spi_device	*spi = to_spi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	if (len != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static DEVICE_ATTR_RO(modalias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static ssize_t driver_override_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 				     struct device_attribute *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 				     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	struct spi_device *spi = to_spi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	const char *end = memchr(buf, '\n', count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	const size_t len = end ? end - buf : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	const char *driver_override, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	/* We need to keep extra room for a newline when displaying value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (len >= (PAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	driver_override = kstrndup(buf, len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	if (!driver_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	old = spi->driver_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		spi->driver_override = driver_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		/* Empty string, disable driver override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		spi->driver_override = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		kfree(driver_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	kfree(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static ssize_t driver_override_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 				    struct device_attribute *a, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	const struct spi_device *spi = to_spi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) static DEVICE_ATTR_RW(driver_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define SPI_STATISTICS_ATTRS(field, file)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static ssize_t spi_controller_##field##_show(struct device *dev,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 					     struct device_attribute *attr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 					     char *buf)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct spi_controller *ctlr = container_of(dev,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 					 struct spi_controller, dev);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	return spi_statistics_##field##_show(&ctlr->statistics, buf);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static struct device_attribute dev_attr_spi_controller_##field = {	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	.attr = { .name = file, .mode = 0444 },				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	.show = spi_controller_##field##_show,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) };									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static ssize_t spi_device_##field##_show(struct device *dev,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 					 struct device_attribute *attr,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 					char *buf)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	struct spi_device *spi = to_spi_device(dev);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	return spi_statistics_##field##_show(&spi->statistics, buf);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static struct device_attribute dev_attr_spi_device_##field = {		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	.attr = { .name = file, .mode = 0444 },				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	.show = spi_device_##field##_show,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 					    char *buf)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	unsigned long flags;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	ssize_t len;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	spin_lock_irqsave(&stat->lock, flags);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	len = sprintf(buf, format_string, stat->field);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	spin_unlock_irqrestore(&stat->lock, flags);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	return len;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) SPI_STATISTICS_ATTRS(name, file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define SPI_STATISTICS_SHOW(field, format_string)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 				 field, format_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) SPI_STATISTICS_SHOW(messages, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) SPI_STATISTICS_SHOW(transfers, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) SPI_STATISTICS_SHOW(errors, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) SPI_STATISTICS_SHOW(timedout, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) SPI_STATISTICS_SHOW(spi_sync, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) SPI_STATISTICS_SHOW(spi_async, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) SPI_STATISTICS_SHOW(bytes, "%llu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) SPI_STATISTICS_SHOW(bytes_rx, "%llu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) SPI_STATISTICS_SHOW(bytes_tx, "%llu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				 "transfer_bytes_histo_" number,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 				 transfer_bytes_histo[index],  "%lu")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) static struct attribute *spi_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	&dev_attr_modalias.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	&dev_attr_driver_override.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static const struct attribute_group spi_dev_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	.attrs  = spi_dev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static struct attribute *spi_device_statistics_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	&dev_attr_spi_device_messages.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	&dev_attr_spi_device_transfers.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	&dev_attr_spi_device_errors.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	&dev_attr_spi_device_timedout.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	&dev_attr_spi_device_spi_sync.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	&dev_attr_spi_device_spi_sync_immediate.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	&dev_attr_spi_device_spi_async.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	&dev_attr_spi_device_bytes.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	&dev_attr_spi_device_bytes_rx.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	&dev_attr_spi_device_bytes_tx.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	&dev_attr_spi_device_transfers_split_maxsize.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static const struct attribute_group spi_device_statistics_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	.name  = "statistics",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	.attrs  = spi_device_statistics_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static const struct attribute_group *spi_dev_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	&spi_dev_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	&spi_device_statistics_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static struct attribute *spi_controller_statistics_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	&dev_attr_spi_controller_messages.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	&dev_attr_spi_controller_transfers.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	&dev_attr_spi_controller_errors.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	&dev_attr_spi_controller_timedout.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	&dev_attr_spi_controller_spi_sync.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	&dev_attr_spi_controller_spi_sync_immediate.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	&dev_attr_spi_controller_spi_async.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	&dev_attr_spi_controller_bytes.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	&dev_attr_spi_controller_bytes_rx.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	&dev_attr_spi_controller_bytes_tx.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	&dev_attr_spi_controller_transfers_split_maxsize.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) static const struct attribute_group spi_controller_statistics_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	.name  = "statistics",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	.attrs  = spi_controller_statistics_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static const struct attribute_group *spi_master_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	&spi_controller_statistics_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 				       struct spi_transfer *xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 				       struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (l2len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		l2len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	spin_lock_irqsave(&stats->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	stats->transfers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	stats->transfer_bytes_histo[l2len]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	stats->bytes += xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	if ((xfer->tx_buf) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	    (xfer->tx_buf != ctlr->dummy_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		stats->bytes_tx += xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if ((xfer->rx_buf) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	    (xfer->rx_buf != ctlr->dummy_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		stats->bytes_rx += xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	spin_unlock_irqrestore(&stats->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * and the sysfs version makes coldplug work too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 						const struct spi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	while (id->name[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		if (!strcmp(sdev->modalias, id->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	return spi_match_id(sdrv->id_table, sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) EXPORT_SYMBOL_GPL(spi_get_device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static int spi_match_device(struct device *dev, struct device_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	const struct spi_device	*spi = to_spi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	const struct spi_driver	*sdrv = to_spi_driver(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	/* Check override first, and if set, only use the named driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	if (spi->driver_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		return strcmp(spi->driver_override, drv->name) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	/* Attempt an OF style match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	if (of_driver_match_device(dev, drv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/* Then try ACPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	if (acpi_driver_match_device(dev, drv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (sdrv->id_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		return !!spi_match_id(sdrv->id_table, spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	return strcmp(spi->modalias, drv->name) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	const struct spi_device		*spi = to_spi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	rc = acpi_device_uevent_modalias(dev, env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	if (rc != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) struct bus_type spi_bus_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	.name		= "spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	.dev_groups	= spi_dev_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	.match		= spi_match_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	.uevent		= spi_uevent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) EXPORT_SYMBOL_GPL(spi_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) static int spi_drv_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	struct spi_device		*spi = to_spi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	ret = of_clk_set_defaults(dev->of_node, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	if (dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		spi->irq = of_irq_get(dev->of_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		if (spi->irq == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		if (spi->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			spi->irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	ret = dev_pm_domain_attach(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (sdrv->probe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		ret = sdrv->probe(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			dev_pm_domain_detach(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) static int spi_drv_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (sdrv->remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		ret = sdrv->remove(to_spi_device(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	dev_pm_domain_detach(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) static void spi_drv_shutdown(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	sdrv->shutdown(to_spi_device(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  * __spi_register_driver - register a SPI driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  * @owner: owner module of the driver to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  * @sdrv: the driver to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	sdrv->driver.owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	sdrv->driver.bus = &spi_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	sdrv->driver.probe = spi_drv_probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	sdrv->driver.remove = spi_drv_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (sdrv->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		sdrv->driver.shutdown = spi_drv_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	return driver_register(&sdrv->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) EXPORT_SYMBOL_GPL(__spi_register_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) /* SPI devices should normally not be created by SPI device drivers; that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  * would make them board-specific.  Similarly with SPI controller drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  * Device registration normally goes into like arch/.../mach.../board-YYY.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  * with other readonly (flashable) information about mainboard devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) struct boardinfo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	struct spi_board_info	board_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) static LIST_HEAD(board_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static LIST_HEAD(spi_controller_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  * Used to protect add/del operation for board_info list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470)  * spi_controller list, and their matching process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471)  * also used to protect object of type struct idr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static DEFINE_MUTEX(board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  * Prevents addition of devices with same chip select and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * addition of devices below an unregistering controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) static DEFINE_MUTEX(spi_add_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  * spi_alloc_device - Allocate a new SPI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483)  * @ctlr: Controller to which device is connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  * Allows a driver to allocate and initialize a spi_device without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  * registering it immediately.  This allows a driver to directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * fill the spi_device with device parameters before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * spi_add_device() on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  * Caller is responsible to call spi_add_device() on the returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  * spi_device structure to add it to the SPI controller.  If the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  * needs to discard the spi_device without adding it, then it should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * call spi_dev_put() on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * Return: a pointer to the new device, or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct spi_device	*spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (!spi_controller_get(ctlr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (!spi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		spi_controller_put(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	spi->master = spi->controller = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	spi->dev.parent = &ctlr->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	spi->dev.bus = &spi_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	spi->dev.release = spidev_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	spi->cs_gpio = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	spi->mode = ctlr->buswidth_override_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	spin_lock_init(&spi->statistics.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	device_initialize(&spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	return spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) EXPORT_SYMBOL_GPL(spi_alloc_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) static void spi_dev_set_name(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (adev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		     spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) static int spi_dev_check(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	struct spi_device *spi = to_spi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	struct spi_device *new_spi = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	if (spi->controller == new_spi->controller &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	    spi->chip_select == new_spi->chip_select)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) static void spi_cleanup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (spi->controller->cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		spi->controller->cleanup(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * spi_add_device - Add spi_device allocated with spi_alloc_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * @spi: spi_device to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * Companion function to spi_alloc_device.  Devices allocated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * spi_alloc_device can be added onto the spi bus with this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * Return: 0 on success; negative errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) int spi_add_device(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	struct device *dev = ctlr->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	/* Chipselects are numbered 0..max; validate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (spi->chip_select >= ctlr->num_chipselect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			ctlr->num_chipselect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	/* Set the bus ID string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	spi_dev_set_name(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	/* We need to make sure there's no other device with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	 * chipselect **BEFORE** we call setup(), else we'll trash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	 * its configuration.  Lock against concurrent add() calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	mutex_lock(&spi_add_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		dev_err(dev, "chipselect %d already in use\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 				spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	/* Controller may unregister concurrently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	    !device_is_registered(&ctlr->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		status = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	/* Descriptors take precedence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (ctlr->cs_gpiods)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	else if (ctlr->cs_gpios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	/* Drivers may modify this initial i/o setup, but will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 * normally rely on the device being setup.  Devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 * using SPI_CS_HIGH can't coexist well otherwise...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	status = spi_setup(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		dev_err(dev, "can't setup %s, status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 				dev_name(&spi->dev), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/* Device may be bound to an active driver when this returns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	status = device_add(&spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		dev_err(dev, "can't add %s, status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 				dev_name(&spi->dev), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		spi_cleanup(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	mutex_unlock(&spi_add_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) EXPORT_SYMBOL_GPL(spi_add_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  * spi_new_device - instantiate one new SPI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * @ctlr: Controller to which device is connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  * @chip: Describes the SPI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * On typical mainboards, this is purely internal; and it's not needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * after board init creates the hard-wired devices.  Some development
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * platforms may not be able to use spi_register_board_info though, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * this is exported so that for example a USB or parport based adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * driver could add devices (which it would learn about out-of-band).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  * Return: the new device, or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) struct spi_device *spi_new_device(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 				  struct spi_board_info *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	struct spi_device	*proxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	/* NOTE:  caller did any chip->bus_num checks necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 * Also, unless we change the return value convention to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	 * suggests syslogged diagnostics are best here (ugh).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	proxy = spi_alloc_device(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (!proxy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	proxy->chip_select = chip->chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	proxy->max_speed_hz = chip->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	proxy->mode = chip->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	proxy->irq = chip->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	proxy->dev.platform_data = (void *) chip->platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	proxy->controller_data = chip->controller_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	proxy->controller_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (chip->properties) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		status = device_add_properties(&proxy->dev, chip->properties);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			dev_err(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				"failed to add properties to '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				chip->modalias, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			goto err_dev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	status = spi_add_device(proxy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		goto err_remove_props;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	return proxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) err_remove_props:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	if (chip->properties)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		device_remove_properties(&proxy->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) err_dev_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	spi_dev_put(proxy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) EXPORT_SYMBOL_GPL(spi_new_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  * spi_unregister_device - unregister a single SPI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * @spi: spi_device to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * Start making the passed SPI device vanish. Normally this would be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * by spi_unregister_controller().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) void spi_unregister_device(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (!spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (spi->dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		of_node_put(spi->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	if (ACPI_COMPANION(&spi->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	device_del(&spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	spi_cleanup(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	put_device(&spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) EXPORT_SYMBOL_GPL(spi_unregister_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 					      struct spi_board_info *bi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	struct spi_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (ctlr->bus_num != bi->bus_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	dev = spi_new_device(ctlr, bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			bi->modalias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  * spi_register_board_info - register SPI devices for a given board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  * @info: array of chip descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * @n: how many descriptors are provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  * Board-specific early init code calls this (probably during arch_initcall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * with segments of the SPI device table.  Any device nodes are created later,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * after the relevant parent SPI controller (bus_num) is defined.  We keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  * this table of devices forever, so that reloading a controller driver will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  * not make Linux forget about these hard-wired devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  * Other code can also call this, e.g. a particular add-on board might provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * SPI devices through its expansion connector, so code initializing that board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  * would naturally declare its SPI devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * The board info passed can safely be __initdata ... but be careful of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * any embedded pointers (platform_data, etc), they're copied as-is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * Device properties are deep-copied though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) int spi_register_board_info(struct spi_board_info const *info, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct boardinfo *bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	if (!bi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	for (i = 0; i < n; i++, bi++, info++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		memcpy(&bi->board_info, info, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		if (info->properties) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			bi->board_info.properties =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 					property_entries_dup(info->properties);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			if (IS_ERR(bi->board_info.properties))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 				return PTR_ERR(bi->board_info.properties);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		mutex_lock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		list_add_tail(&bi->list, &board_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		list_for_each_entry(ctlr, &spi_controller_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			spi_match_controller_to_boardinfo(ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 							  &bi->board_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		mutex_unlock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	bool enable1 = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	 * Avoid calling into the driver (or doing delays) if the chip select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	 * isn't actually changing from the last time this was called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (!force && (spi->controller->last_cs_enable == enable) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	spi->controller->last_cs_enable = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	if (!spi->controller->set_cs_timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		if (enable1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			spi_delay_exec(&spi->controller->cs_setup, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			spi_delay_exec(&spi->controller->cs_hold, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	if (spi->mode & SPI_CS_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		enable = !enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		if (!(spi->mode & SPI_NO_CS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			if (spi->cs_gpiod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 				 * Historically ACPI has no means of the GPIO polarity and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 				 * thus the SPISerialBus() resource defines it on the per-chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 				 * basis. In order to avoid a chain of negations, the GPIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 				 * polarity is considered being Active High. Even for the cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 				 * when _DSD() is involved (in the updated versions of ACPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 				 * the GPIO CS polarity must be defined Active High to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 				 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 				 * into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				if (has_acpi_companion(&spi->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 					gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 					/* Polarity handled by GPIO library */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 					gpiod_set_value_cansleep(spi->cs_gpiod, enable1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 				 * invert the enable line, as active low is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 				 * default for SPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				gpio_set_value_cansleep(spi->cs_gpio, !enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		/* Some SPI masters need both GPIO CS & slave_select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		    spi->controller->set_cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			spi->controller->set_cs(spi, !enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	} else if (spi->controller->set_cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		spi->controller->set_cs(spi, !enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	if (!spi->controller->set_cs_timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		if (!enable1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 			spi_delay_exec(&spi->controller->cs_inactive, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) #ifdef CONFIG_HAS_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		struct sg_table *sgt, void *buf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	const bool vmalloced_buf = is_vmalloc_addr(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				(unsigned long)buf < (PKMAP_BASE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 					(LAST_PKMAP * PAGE_SIZE)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	const bool kmap_buf = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	int desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	int sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct page *vm_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	void *sg_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	size_t min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (vmalloced_buf || kmap_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	} else if (virt_addr_valid(buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		sgs = DIV_ROUND_UP(len, desc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	sg = &sgt->sgl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	for (i = 0; i < sgs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		if (vmalloced_buf || kmap_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			 * Next scatterlist entry size is the minimum between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			 * the desc_len and the remaining buffer length that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			 * fits in a page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			min = min_t(size_t, desc_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 				    min_t(size_t, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 					  PAGE_SIZE - offset_in_page(buf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			if (vmalloced_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 				vm_page = vmalloc_to_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 				vm_page = kmap_to_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			if (!vm_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 				sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			sg_set_page(sg, vm_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 				    min, offset_in_page(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			min = min_t(size_t, len, desc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			sg_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			sg_set_buf(sg, sg_buf, min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		buf += min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		len -= min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	sgt->nents = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		   struct sg_table *sgt, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	if (sgt->orig_nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct device *tx_dev, *rx_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	if (!ctlr->can_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	if (ctlr->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		tx_dev = ctlr->dma_tx->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		tx_dev = ctlr->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (ctlr->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		rx_dev = ctlr->dma_rx->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		rx_dev = ctlr->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		if (xfer->tx_buf != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 					  (void *)xfer->tx_buf, xfer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 					  DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		if (xfer->rx_buf != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 					  xfer->rx_buf, xfer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 					  DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 				spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 					      DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	ctlr->cur_msg_mapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	struct device *tx_dev, *rx_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	if (ctlr->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		tx_dev = ctlr->dma_tx->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		tx_dev = ctlr->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	if (ctlr->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		rx_dev = ctlr->dma_rx->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		rx_dev = ctlr->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	ctlr->cur_msg_mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) #else /* !CONFIG_HAS_DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static inline int __spi_map_msg(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 				struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static inline int __spi_unmap_msg(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 				  struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) #endif /* !CONFIG_HAS_DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static inline int spi_unmap_msg(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 				struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		 * Restore the original value of tx_buf or rx_buf if they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		 * NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		if (xfer->tx_buf == ctlr->dummy_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			xfer->tx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		if (xfer->rx_buf == ctlr->dummy_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			xfer->rx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	return __spi_unmap_msg(ctlr, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	unsigned int max_tx, max_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		&& !(msg->spi->mode & SPI_3WIRE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		max_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		max_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			    !xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 				max_tx = max(xfer->len, max_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			    !xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 				max_rx = max(xfer->len, max_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		if (max_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			tmp = krealloc(ctlr->dummy_tx, max_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				       GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			ctlr->dummy_tx = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			memset(tmp, 0, max_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		if (max_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			tmp = krealloc(ctlr->dummy_rx, max_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 				       GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			ctlr->dummy_rx = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		if (max_tx || max_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			list_for_each_entry(xfer, &msg->transfers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 					    transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				if (!xfer->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 				if (!xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 					xfer->tx_buf = ctlr->dummy_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 				if (!xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 					xfer->rx_buf = ctlr->dummy_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	return __spi_map_msg(ctlr, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static int spi_transfer_wait(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			     struct spi_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			     struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	struct spi_statistics *statm = &ctlr->statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	struct spi_statistics *stats = &msg->spi->statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	u32 speed_hz = xfer->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	unsigned long long ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	if (spi_controller_is_slave(ctlr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		if (!speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			speed_hz = 100000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		ms = 8LL * 1000LL * xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		do_div(ms, speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		ms += ms + 200; /* some tolerance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		if (ms > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			ms = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 						 msecs_to_jiffies(ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		if (ms == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			dev_err(&msg->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 				"SPI transfer timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static void _spi_transfer_delay_ns(u32 ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (!ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	if (ns <= 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		ndelay(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		u32 us = DIV_ROUND_UP(ns, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		if (us <= 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			udelay(us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			usleep_range(us, us + DIV_ROUND_UP(us, 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	u32 delay = _delay->value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	u32 unit = _delay->unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	u32 hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	if (!delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	switch (unit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	case SPI_DELAY_UNIT_USECS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		delay *= 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	case SPI_DELAY_UNIT_SCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		/* clock cycles need to be obtained from spi_transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		if (!xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		/* if there is no effective speed know, then approximate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		 * by underestimating with half the requested hz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		if (!hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		delay *= DIV_ROUND_UP(1000000000, hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	return delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) EXPORT_SYMBOL_GPL(spi_delay_to_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	int delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	if (!_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	delay = spi_delay_to_ns(_delay, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	if (delay < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		return delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	_spi_transfer_delay_ns(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) EXPORT_SYMBOL_GPL(spi_delay_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static void _spi_transfer_cs_change_delay(struct spi_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 					  struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	u32 delay = xfer->cs_change_delay.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	u32 unit = xfer->cs_change_delay.unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	/* return early on "fast" mode - for everything but USECS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	if (!delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		if (unit == SPI_DELAY_UNIT_USECS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			_spi_transfer_delay_ns(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		dev_err_once(&msg->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			     "Use of unsupported delay unit %i, using default of 10us\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			     unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		_spi_transfer_delay_ns(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * spi_transfer_one_message - Default implementation of transfer_one_message()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * This is a standard implementation of transfer_one_message() for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * drivers which implement a transfer_one() operation.  It provides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  * standard handling of delays and chip select management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static int spi_transfer_one_message(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 				    struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	bool keep_cs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	struct spi_statistics *statm = &ctlr->statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct spi_statistics *stats = &msg->spi->statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	spi_set_cs(msg->spi, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		trace_spi_transfer_start(msg, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		spi_statistics_add_transfer_stats(statm, xfer, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		spi_statistics_add_transfer_stats(stats, xfer, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		if (!ctlr->ptp_sts_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			xfer->ptp_sts_word_pre = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			ptp_read_system_prets(xfer->ptp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			reinit_completion(&ctlr->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) fallback_pio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				if (ctlr->cur_msg_mapped &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 				   (xfer->error & SPI_TRANS_FAIL_NO_START)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 					__spi_unmap_msg(ctlr, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 					ctlr->fallback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 					goto fallback_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 				SPI_STATISTICS_INCREMENT_FIELD(statm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 							       errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 				SPI_STATISTICS_INCREMENT_FIELD(stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 							       errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 				dev_err(&msg->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 					"SPI transfer failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 				ret = spi_transfer_wait(ctlr, msg, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 					msg->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			if (xfer->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				dev_err(&msg->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 					"Bufferless transfer has length %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 					xfer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		if (!ctlr->ptp_sts_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			ptp_read_system_postts(xfer->ptp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			xfer->ptp_sts_word_post = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		trace_spi_transfer_stop(msg, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		if (msg->status != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		spi_transfer_delay_exec(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		if (xfer->cs_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			if (list_is_last(&xfer->transfer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 					 &msg->transfers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 				keep_cs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				spi_set_cs(msg->spi, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 				_spi_transfer_cs_change_delay(msg, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 				spi_set_cs(msg->spi, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		msg->actual_length += xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	if (ret != 0 || !keep_cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		spi_set_cs(msg->spi, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	if (msg->status == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		msg->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (msg->status && ctlr->handle_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		ctlr->handle_err(ctlr, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	spi_finalize_current_message(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)  * spi_finalize_current_transfer - report completion of a transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)  * @ctlr: the controller reporting completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)  * Called by SPI drivers using the core transfer_one_message()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)  * implementation to notify it that the current interrupt driven
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)  * transfer has finished and the next one may be scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) void spi_finalize_current_transfer(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	complete(&ctlr->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static void spi_idle_runtime_pm(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	if (ctlr->auto_runtime_pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		pm_runtime_mark_last_busy(ctlr->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		pm_runtime_put_autosuspend(ctlr->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  * __spi_pump_messages - function which processes spi message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  * @ctlr: controller to process queue for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * @in_kthread: true if we are in the context of the message pump thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  * This function checks if there is any spi message in the queue that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)  * needs processing and if so call out to the driver to initialize hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)  * and transfer each message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)  * Note that it is called both from the kthread itself and also from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)  * inside spi_sync(); the queue extraction handling at the top of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)  * function should deal with this safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	struct spi_message *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	bool was_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	/* Lock queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	spin_lock_irqsave(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	/* Make sure we are not already running a message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (ctlr->cur_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	/* If another context is idling the device then defer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	if (ctlr->idling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	/* Check if the queue is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if (list_empty(&ctlr->queue) || !ctlr->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		if (!ctlr->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		/* Defer any non-atomic teardown to the thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		if (!in_kthread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			    !ctlr->unprepare_transfer_hardware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 				spi_idle_runtime_pm(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 				ctlr->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 				trace_spi_controller_idle(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 				kthread_queue_work(ctlr->kworker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 						   &ctlr->pump_messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		ctlr->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		ctlr->idling = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		kfree(ctlr->dummy_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		ctlr->dummy_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		kfree(ctlr->dummy_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		ctlr->dummy_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		if (ctlr->unprepare_transfer_hardware &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		    ctlr->unprepare_transfer_hardware(ctlr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			dev_err(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 				"failed to unprepare transfer hardware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		spi_idle_runtime_pm(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		trace_spi_controller_idle(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		spin_lock_irqsave(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		ctlr->idling = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	/* Extract head of queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	ctlr->cur_msg = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	list_del_init(&msg->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (ctlr->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		was_busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		ctlr->busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	mutex_lock(&ctlr->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	if (!was_busy && ctlr->auto_runtime_pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		ret = pm_runtime_get_sync(ctlr->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			pm_runtime_put_noidle(ctlr->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 			mutex_unlock(&ctlr->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (!was_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		trace_spi_controller_busy(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	if (!was_busy && ctlr->prepare_transfer_hardware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		ret = ctlr->prepare_transfer_hardware(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			dev_err(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 				"failed to prepare transfer hardware: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			if (ctlr->auto_runtime_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 				pm_runtime_put(ctlr->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			msg->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			spi_finalize_current_message(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			mutex_unlock(&ctlr->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	trace_spi_message_start(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	if (ctlr->prepare_message) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		ret = ctlr->prepare_message(ctlr, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			msg->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			spi_finalize_current_message(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		ctlr->cur_msg_prepared = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	ret = spi_map_msg(ctlr, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		msg->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		spi_finalize_current_message(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			xfer->ptp_sts_word_pre = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			ptp_read_system_prets(xfer->ptp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	ret = ctlr->transfer_one_message(ctlr, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		dev_err(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			"failed to transfer one message from queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	mutex_unlock(&ctlr->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	/* Prod the scheduler in case transfer_one() was busy waiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  * spi_pump_messages - kthread work function which processes spi message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  * @work: pointer to kthread work struct contained in the controller struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static void spi_pump_messages(struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	struct spi_controller *ctlr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		container_of(work, struct spi_controller, pump_messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	__spi_pump_messages(ctlr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  *			    TX timestamp for the requested byte from the SPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  *			    transfer. The frequency with which this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  *			    must be called (once per word, once for the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)  *			    transfer, once per batch of words etc) is arbitrary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)  *			    as long as the @tx buffer offset is greater than or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)  *			    equal to the requested byte at the time of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)  *			    call. The timestamp is only taken once, at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)  *			    first such call. It is assumed that the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)  *			    advances its @tx buffer pointer monotonically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)  * @ctlr: Pointer to the spi_controller structure of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)  * @xfer: Pointer to the transfer being timestamped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)  * @progress: How many words (not bytes) have been transferred so far
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)  *	      transfer, for less jitter in time measurement. Only compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)  *	      with PIO drivers. If true, must follow up with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  *	      spi_take_timestamp_post or otherwise system will crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  *	      WARNING: for fully predictable results, the CPU frequency must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  *	      also be under control (governor).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) void spi_take_timestamp_pre(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 			    struct spi_transfer *xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 			    size_t progress, bool irqs_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	if (!xfer->ptp_sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	if (xfer->timestamped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (progress > xfer->ptp_sts_word_pre)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	/* Capture the resolution of the timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	xfer->ptp_sts_word_pre = progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	if (irqs_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		local_irq_save(ctlr->irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	ptp_read_system_prets(xfer->ptp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)  * spi_take_timestamp_post - helper for drivers to collect the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)  *			     TX timestamp for the requested byte from the SPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)  *			     transfer. Can be called with an arbitrary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)  *			     frequency: only the first call where @tx exceeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)  *			     or is equal to the requested word will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)  *			     timestamped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)  * @ctlr: Pointer to the spi_controller structure of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)  * @xfer: Pointer to the transfer being timestamped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)  * @progress: How many words (not bytes) have been transferred so far
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) void spi_take_timestamp_post(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			     struct spi_transfer *xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			     size_t progress, bool irqs_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	if (!xfer->ptp_sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	if (xfer->timestamped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	if (progress < xfer->ptp_sts_word_post)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	ptp_read_system_postts(xfer->ptp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	if (irqs_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		local_irq_restore(ctlr->irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	/* Capture the resolution of the timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	xfer->ptp_sts_word_post = progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	xfer->timestamped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)  * spi_set_thread_rt - set the controller to pump at realtime priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)  * @ctlr: controller to boost priority of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)  * This can be called because the controller requested realtime priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)  * (by setting the ->rt value before calling spi_register_controller()) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)  * because a device on the bus said that its transfers needed realtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)  * priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)  * NOTE: at the moment if any device on a bus says it needs realtime then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)  * the thread will be at realtime priority for all transfers on that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)  * controller.  If this eventually becomes a problem we may see if we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)  * find a way to boost the priority only temporarily during relevant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)  * transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static void spi_set_thread_rt(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	dev_info(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		"will run message pump with realtime priority\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	sched_set_fifo(ctlr->kworker->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static int spi_init_queue(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	ctlr->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	ctlr->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	if (IS_ERR(ctlr->kworker)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		return PTR_ERR(ctlr->kworker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	 * Controller config will indicate if this controller should run the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	 * message pump with high (realtime) priority to reduce the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	 * latency on the bus by minimising the delay between a transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	 * request and the scheduling of the message pump thread. Without this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	 * setting the message pump thread will remain at default priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	if (ctlr->rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		spi_set_thread_rt(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)  * spi_get_next_queued_message() - called by driver to check for queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)  * messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)  * @ctlr: the controller to check for queued messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  * If there are more messages in the queue, the next message is returned from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)  * this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)  * Return: the next message in the queue, else NULL if the queue is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	struct spi_message *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	/* get a pointer to the next message, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	spin_lock_irqsave(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 					queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  * spi_finalize_current_message() - the current message is complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  * @ctlr: the controller to return the message to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)  * Called by the driver to notify the core that the message in the front of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)  * queue is complete and can be removed from the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) void spi_finalize_current_message(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	struct spi_message *mesg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	spin_lock_irqsave(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	mesg = ctlr->cur_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			ptp_read_system_postts(xfer->ptp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 			xfer->ptp_sts_word_post = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	if (unlikely(ctlr->ptp_sts_supported))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	spi_unmap_msg(ctlr, mesg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	/* In the prepare_messages callback the spi bus has the opportunity to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	 * split a transfer to smaller chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	 * Release splited transfers here since spi_map_msg is done on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	 * splited transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	spi_res_release(ctlr, mesg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		ret = ctlr->unprepare_message(ctlr, mesg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	spin_lock_irqsave(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	ctlr->cur_msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	ctlr->cur_msg_prepared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	ctlr->fallback = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	trace_spi_message_done(mesg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	mesg->state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	if (mesg->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		mesg->complete(mesg->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) EXPORT_SYMBOL_GPL(spi_finalize_current_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) static int spi_start_queue(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	spin_lock_irqsave(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	if (ctlr->running || ctlr->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	ctlr->running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	ctlr->cur_msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static int spi_stop_queue(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	unsigned limit = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	spin_lock_irqsave(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	 * This is a bit lame, but is optimized for the common execution path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	 * A wait_queue on the ctlr->busy could be used, but then the common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	 * execution path (pump_messages) would be required to call wake_up or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	 * friends on every SPI message. Do this instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		usleep_range(10000, 11000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		spin_lock_irqsave(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	if (!list_empty(&ctlr->queue) || ctlr->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		ctlr->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		dev_warn(&ctlr->dev, "could not stop message queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static int spi_destroy_queue(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	ret = spi_stop_queue(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	 * kthread_flush_worker will block until all work is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	 * If the reason that stop_queue timed out is that the work will never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	 * finish, then it does no good to call flush/stop thread, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	 * return anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		dev_err(&ctlr->dev, "problem destroying queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	kthread_destroy_worker(ctlr->kworker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) static int __spi_queued_transfer(struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 				 struct spi_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 				 bool need_pump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	spin_lock_irqsave(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	if (!ctlr->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	msg->actual_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	msg->status = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	list_add_tail(&msg->queue, &ctlr->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	if (!ctlr->busy && need_pump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)  * spi_queued_transfer - transfer function for queued transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)  * @spi: spi device which is requesting transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)  * @msg: spi message which is to handled is queued to driver queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	return __spi_queued_transfer(spi, msg, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) static int spi_controller_initialize_queue(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	ctlr->transfer = spi_queued_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	if (!ctlr->transfer_one_message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		ctlr->transfer_one_message = spi_transfer_one_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	/* Initialize and start queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	ret = spi_init_queue(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		dev_err(&ctlr->dev, "problem initializing queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		goto err_init_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	ctlr->queued = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	ret = spi_start_queue(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		dev_err(&ctlr->dev, "problem starting queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		goto err_start_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) err_start_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	spi_destroy_queue(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) err_init_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)  * spi_flush_queue - Send all pending messages in the queue from the callers'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)  *		     context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)  * @ctlr: controller to process queue for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)  * This should be used when one wants to ensure all pending messages have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)  * sent before doing something. Is used by the spi-mem code to make sure SPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)  * memory operations do not preempt regular SPI transfers that have been queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)  * before the spi-mem operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) void spi_flush_queue(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	if (ctlr->transfer == spi_queued_transfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		__spi_pump_messages(ctlr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) #if defined(CONFIG_OF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 			   struct device_node *nc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	/* Mode (clock phase/polarity/etc.) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	if (of_property_read_bool(nc, "spi-cpha"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		spi->mode |= SPI_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (of_property_read_bool(nc, "spi-cpol"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		spi->mode |= SPI_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	if (of_property_read_bool(nc, "spi-3wire"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		spi->mode |= SPI_3WIRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	if (of_property_read_bool(nc, "spi-lsb-first"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		spi->mode |= SPI_LSB_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	if (of_property_read_bool(nc, "spi-cs-high"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		spi->mode |= SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	/* Device DUAL/QUAD mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		switch (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 			spi->mode |= SPI_TX_DUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 			spi->mode |= SPI_TX_QUAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 			spi->mode |= SPI_TX_OCTAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 			dev_warn(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 				"spi-tx-bus-width %d not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 				value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		switch (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 			spi->mode |= SPI_RX_DUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			spi->mode |= SPI_RX_QUAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 			spi->mode |= SPI_RX_OCTAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 			dev_warn(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 				"spi-rx-bus-width %d not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 				value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	if (spi_controller_is_slave(ctlr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		if (!of_node_name_eq(nc, "slave")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 				nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	/* Device address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	rc = of_property_read_u32(nc, "reg", &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			nc, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	spi->chip_select = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	/* Device speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		spi->max_speed_hz = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) static struct spi_device *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	struct spi_device *spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	/* Alloc an spi_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	spi = spi_alloc_device(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	if (!spi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	/* Select device driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	rc = of_modalias_node(nc, spi->modalias,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 				sizeof(spi->modalias));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	rc = of_spi_parse_dt(ctlr, spi, nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	/* Store a pointer to the node in the device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	of_node_get(nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	spi->dev.of_node = nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	spi->dev.fwnode = of_fwnode_handle(nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	/* Register the new device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	rc = spi_add_device(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		goto err_of_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	return spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) err_of_node_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	of_node_put(nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	spi_dev_put(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)  * of_register_spi_devices() - Register child devices onto the SPI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)  * @ctlr:	Pointer to spi_controller device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)  * Registers an spi_device for each child node of controller node which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)  * represents a valid SPI slave.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) static void of_register_spi_devices(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	struct spi_device *spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	struct device_node *nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	if (!ctlr->dev.of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		spi = of_register_spi_device(ctlr, nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		if (IS_ERR(spi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 			dev_warn(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 				 "Failed to create SPI device for %pOF\n", nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 			of_node_clear_flag(nc, OF_POPULATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) static void of_register_spi_devices(struct spi_controller *ctlr) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) struct acpi_spi_lookup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	struct spi_controller 	*ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	u32			max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	u32			mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	int			irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	u8			bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	u8			chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 					    struct acpi_spi_lookup *lookup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	const union acpi_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	if (!x86_apple_machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	    && obj->buffer.length >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	    && obj->buffer.length == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		lookup->mode |= SPI_LSB_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		lookup->mode |= SPI_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		lookup->mode |= SPI_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	struct acpi_spi_lookup *lookup = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	struct spi_controller *ctlr = lookup->ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		struct acpi_resource_spi_serialbus *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		acpi_handle parent_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		sb = &ares->data.spi_serial_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 			status = acpi_get_handle(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 						 sb->resource_source.string_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 						 &parent_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 			if (ACPI_FAILURE(status) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 			    ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 				return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 			 * ACPI DeviceSelection numbering is handled by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 			 * host controller driver in Windows and can vary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 			 * from driver to driver. In Linux we always expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 			 * 0 .. max - 1 so we need to ask the driver to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 			 * translate between the two schemes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			if (ctlr->fw_translate_cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 				int cs = ctlr->fw_translate_cs(ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 						sb->device_selection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 				if (cs < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 					return cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 				lookup->chip_select = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 				lookup->chip_select = sb->device_selection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 			lookup->max_speed_hz = sb->connection_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 			lookup->bits_per_word = sb->data_bit_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 				lookup->mode |= SPI_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				lookup->mode |= SPI_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 				lookup->mode |= SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	} else if (lookup->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		struct resource r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		if (acpi_dev_resource_interrupt(ares, 0, &r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 			lookup->irq = r.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	/* Always tell the ACPI core to skip this resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 					    struct acpi_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	acpi_handle parent_handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	struct list_head resource_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	struct acpi_spi_lookup lookup = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	struct spi_device *spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	if (acpi_bus_get_status(adev) || !adev->status.present ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	    acpi_device_enumerated(adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	lookup.ctlr		= ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	lookup.irq		= -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	INIT_LIST_HEAD(&resource_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	ret = acpi_dev_get_resources(adev, &resource_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 				     acpi_spi_add_resource, &lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	acpi_dev_free_resource_list(&resource_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		/* found SPI in _CRS but it points to another controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	if (!lookup.max_speed_hz &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	    !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	    ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		/* Apple does not use _CRS but nested devices for SPI slaves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		acpi_spi_parse_apple_properties(adev, &lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	if (!lookup.max_speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	spi = spi_alloc_device(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	if (!spi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 			dev_name(&adev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		return AE_NO_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	ACPI_COMPANION_SET(&spi->dev, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	spi->max_speed_hz	= lookup.max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	spi->mode		|= lookup.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	spi->irq		= lookup.irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	spi->bits_per_word	= lookup.bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	spi->chip_select	= lookup.chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 			  sizeof(spi->modalias));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	if (spi->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	acpi_device_set_enumerated(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	adev->power.flags.ignore_parent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	if (spi_add_device(spi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		adev->power.flags.ignore_parent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 			dev_name(&adev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		spi_dev_put(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 				       void *data, void **return_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	struct spi_controller *ctlr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	struct acpi_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	if (acpi_bus_get_device(handle, &adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	return acpi_register_spi_device(ctlr, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) static void acpi_register_spi_devices(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	acpi_handle handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	handle = ACPI_HANDLE(ctlr->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 				     acpi_spi_add_device, NULL, ctlr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) #endif /* CONFIG_ACPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) static void spi_controller_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	ctlr = container_of(dev, struct spi_controller, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	kfree(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) static struct class spi_master_class = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	.name		= "spi_master",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	.dev_release	= spi_controller_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	.dev_groups	= spi_master_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) #ifdef CONFIG_SPI_SLAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)  *		     controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)  * @spi: device used for the current transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) int spi_slave_abort(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		return ctlr->slave_abort(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) EXPORT_SYMBOL_GPL(spi_slave_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) static int match_true(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 			  char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 						   dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	struct device *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	child = device_find_child(&ctlr->dev, NULL, match_true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	return sprintf(buf, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		       child ? to_spi_device(child)->modalias : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 			   const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 						   dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	struct spi_device *spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	struct device *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	char name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	rc = sscanf(buf, "%31s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	if (rc != 1 || !name[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	child = device_find_child(&ctlr->dev, NULL, match_true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	if (child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		/* Remove registered slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		device_unregister(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		put_device(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	if (strcmp(name, "(null)")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		/* Register new slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		spi = spi_alloc_device(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		if (!spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		strlcpy(spi->modalias, name, sizeof(spi->modalias));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		rc = spi_add_device(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 			spi_dev_put(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static DEVICE_ATTR_RW(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) static struct attribute *spi_slave_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	&dev_attr_slave.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) static const struct attribute_group spi_slave_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	.attrs = spi_slave_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) static const struct attribute_group *spi_slave_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	&spi_controller_statistics_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	&spi_slave_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) static struct class spi_slave_class = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	.name		= "spi_slave",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	.dev_release	= spi_controller_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	.dev_groups	= spi_slave_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) extern struct class spi_slave_class;	/* dummy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)  * __spi_alloc_controller - allocate an SPI master or slave controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)  * @dev: the controller, possibly using the platform_bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)  * @size: how much zeroed driver-private data to allocate; the pointer to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)  *	memory is in the driver_data field of the returned device, accessible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)  *	with spi_controller_get_devdata(); the memory is cacheline aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)  *	drivers granting DMA access to portions of their private data need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)  *	round up @size using ALIGN(size, dma_get_cache_alignment()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)  *	slave (true) controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)  * This call is used only by SPI controller drivers, which are the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)  * only ones directly touching chip registers.  It's how they allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)  * an spi_controller structure, prior to calling spi_register_controller().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)  * This must be called from context that can sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)  * The caller is responsible for assigning the bus number and initializing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)  * controller's methods before calling spi_register_controller(); and (after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)  * errors adding the device) calling spi_controller_put() to prevent a memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)  * leak.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)  * Return: the SPI controller structure on success, else NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) struct spi_controller *__spi_alloc_controller(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 					      unsigned int size, bool slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	struct spi_controller	*ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	if (!ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	device_initialize(&ctlr->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	ctlr->bus_num = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	ctlr->num_chipselect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	ctlr->slave = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		ctlr->dev.class = &spi_slave_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		ctlr->dev.class = &spi_master_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	ctlr->dev.parent = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	pm_suspend_ignore_children(&ctlr->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	return ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) EXPORT_SYMBOL_GPL(__spi_alloc_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) static void devm_spi_release_controller(struct device *dev, void *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	spi_controller_put(*(struct spi_controller **)ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)  * @dev: physical device of SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)  * @size: how much zeroed driver-private data to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)  * @slave: whether to allocate an SPI master (false) or SPI slave (true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)  * Allocate an SPI controller and automatically release a reference on it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)  * when @dev is unbound from its driver.  Drivers are thus relieved from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)  * having to call spi_controller_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)  * The arguments to this function are identical to __spi_alloc_controller().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)  * Return: the SPI controller structure on success, else NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 						   unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 						   bool slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	struct spi_controller **ptr, *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 			   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	ctlr = __spi_alloc_controller(dev, size, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	if (ctlr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		*ptr = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 		devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	return ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	int nb, i, *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	struct device_node *np = ctlr->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	nb = of_gpio_named_count(np, "cs-gpios");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	/* Return error only for an incorrectly formed cs-gpios property */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	if (nb == 0 || nb == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	else if (nb < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		return nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 			  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	ctlr->cs_gpios = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	if (!ctlr->cs_gpios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	for (i = 0; i < ctlr->num_chipselect; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		cs[i] = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	for (i = 0; i < nb; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 		cs[i] = of_get_named_gpio(np, "cs-gpios", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)  * spi_get_gpio_descs() - grab chip select GPIOs for the master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)  * @ctlr: The SPI master to grab GPIO descriptors for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) static int spi_get_gpio_descs(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	int nb, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	struct gpio_desc **cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	struct device *dev = &ctlr->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	unsigned long native_cs_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	unsigned int num_cs_gpios = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	nb = gpiod_count(dev, "cs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	/* No GPIOs at all is fine, else return the error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	if (nb == 0 || nb == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	else if (nb < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 		return nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 			  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	if (!cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	ctlr->cs_gpiods = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	for (i = 0; i < nb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		 * Most chipselects are active low, the inverted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		 * semantics are handled by special quirks in gpiolib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		 * so initializing them GPIOD_OUT_LOW here means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		 * "unasserted", in most cases this will drive the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		 * line high.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 						      GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 		if (IS_ERR(cs[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 			return PTR_ERR(cs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		if (cs[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 			 * If we find a CS GPIO, name it after the device and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 			 * chip select line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 			char *gpioname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 						  dev_name(dev), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 			if (!gpioname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 			gpiod_set_consumer_name(cs[i], gpioname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 			num_cs_gpios++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 			dev_err(dev, "Invalid native chip select %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		native_cs_mask |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		dev_err(dev, "No unused native chip select available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) static int spi_controller_check_ops(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	 * The controller may implement only the high-level SPI-memory like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	 * operations if it does not support regular SPI transfers, and this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	 * valid use case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	 * If ->mem_ops is NULL, we request that at least one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	 * ->transfer_xxx() method be implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	if (ctlr->mem_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		if (!ctlr->mem_ops->exec_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	} else if (!ctlr->transfer && !ctlr->transfer_one &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		   !ctlr->transfer_one_message) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)  * spi_register_controller - register SPI master or slave controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)  * @ctlr: initialized master, originally from spi_alloc_master() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)  *	spi_alloc_slave()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)  * SPI controllers connect to their drivers using some non-SPI bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)  * such as the platform bus.  The final stage of probe() in that code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)  * includes calling spi_register_controller() to hook up to this SPI bus glue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)  * SPI controllers use board specific (often SOC specific) bus numbers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)  * and board-specific addressing for SPI devices combines those numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)  * with chip select numbers.  Since SPI does not directly support dynamic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)  * device identification, boards need configuration tables telling which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)  * chip is at which address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)  * This must be called from context that can sleep.  It returns zero on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)  * success, else a negative error code (dropping the controller's refcount).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)  * After a successful return, the caller is responsible for calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)  * spi_unregister_controller().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) int spi_register_controller(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	struct device		*dev = ctlr->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	struct boardinfo	*bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	int			id, first_dynamic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	 * Make sure all necessary hooks are implemented before registering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	 * the SPI controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	status = spi_controller_check_ops(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	if (ctlr->bus_num >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		/* devices with a fixed bus num must check-in with the num */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		mutex_lock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 			ctlr->bus_num + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		mutex_unlock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		if (WARN(id < 0, "couldn't get idr"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 			return id == -ENOSPC ? -EBUSY : id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		ctlr->bus_num = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	} else if (ctlr->dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		/* allocate dynamic bus number using Linux idr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		id = of_alias_get_id(ctlr->dev.of_node, "spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		if (id >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 			ctlr->bus_num = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 			mutex_lock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 			id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 				       ctlr->bus_num + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 			mutex_unlock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			if (WARN(id < 0, "couldn't get idr"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 				return id == -ENOSPC ? -EBUSY : id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	if (ctlr->bus_num < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		first_dynamic = of_alias_get_highest_id("spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		if (first_dynamic < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 			first_dynamic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 			first_dynamic++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		mutex_lock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 			       0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		mutex_unlock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		if (WARN(id < 0, "couldn't get idr"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 			return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 		ctlr->bus_num = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	INIT_LIST_HEAD(&ctlr->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	spin_lock_init(&ctlr->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	spin_lock_init(&ctlr->bus_lock_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	mutex_init(&ctlr->bus_lock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	mutex_init(&ctlr->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	ctlr->bus_lock_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	init_completion(&ctlr->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	if (!ctlr->max_dma_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		ctlr->max_dma_len = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	/* register the device, then userspace will see it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	 * registration fails if the bus ID is in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	if (!spi_controller_is_slave(ctlr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		if (ctlr->use_gpio_descriptors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 			status = spi_get_gpio_descs(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 			if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 				goto free_bus_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			 * A controller using GPIO descriptors always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 			 * supports SPI_CS_HIGH if need be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 			ctlr->mode_bits |= SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 			/* Legacy code path for GPIOs from DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 			status = of_spi_get_gpio_numbers(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 			if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 				goto free_bus_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	 * Even if it's just one always-selected device, there must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 	 * be at least one chipselect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	if (!ctlr->num_chipselect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		goto free_bus_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	status = device_add(&ctlr->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 		goto free_bus_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	dev_dbg(dev, "registered %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 			spi_controller_is_slave(ctlr) ? "slave" : "master",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 			dev_name(&ctlr->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	 * If we're using a queued driver, start the queue. Note that we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	 * need the queueing logic if the driver is only supporting high-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	 * memory operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	if (ctlr->transfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		dev_info(dev, "controller is unqueued, this is deprecated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		status = spi_controller_initialize_queue(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 			device_del(&ctlr->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 			goto free_bus_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	/* add statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	spin_lock_init(&ctlr->statistics.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	mutex_lock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	list_add_tail(&ctlr->list, &spi_controller_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	list_for_each_entry(bi, &board_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	mutex_unlock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	/* Register devices from the device tree and ACPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	of_register_spi_devices(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	acpi_register_spi_devices(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) free_bus_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	mutex_lock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	idr_remove(&spi_master_idr, ctlr->bus_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	mutex_unlock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) EXPORT_SYMBOL_GPL(spi_register_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) static void devm_spi_unregister(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	spi_unregister_controller(*(struct spi_controller **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)  * devm_spi_register_controller - register managed SPI master or slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)  *	controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)  * @dev:    device managing SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)  * @ctlr: initialized controller, originally from spi_alloc_master() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)  *	spi_alloc_slave()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)  * Register a SPI device as with spi_register_controller() which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)  * automatically be unregistered and freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) int devm_spi_register_controller(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 				 struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	struct spi_controller **ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	ret = spi_register_controller(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		*ptr = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 		devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) EXPORT_SYMBOL_GPL(devm_spi_register_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	return *(struct spi_controller **)res == ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) static int __unregister(struct device *dev, void *null)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	spi_unregister_device(to_spi_device(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)  * spi_unregister_controller - unregister SPI master or slave controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)  * @ctlr: the controller being unregistered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)  * This call is used only by SPI controller drivers, which are the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)  * only ones directly touching chip registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)  * This must be called from context that can sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)  * Note that this function also drops a reference to the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) void spi_unregister_controller(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	struct spi_controller *found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	int id = ctlr->bus_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	/* Prevent addition of new devices, unregister existing ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		mutex_lock(&spi_add_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	device_for_each_child(&ctlr->dev, NULL, __unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	/* First make sure that this controller was ever added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	mutex_lock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	found = idr_find(&spi_master_idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	mutex_unlock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	if (ctlr->queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 		if (spi_destroy_queue(ctlr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 			dev_err(&ctlr->dev, "queue remove failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	mutex_lock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	list_del(&ctlr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	mutex_unlock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	device_del(&ctlr->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	/* Release the last reference on the controller if its driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	 * has not yet been converted to devm_spi_alloc_master/slave().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 			 devm_spi_match_controller, ctlr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 		put_device(&ctlr->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	/* free bus id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	mutex_lock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	if (found == ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		idr_remove(&spi_master_idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	mutex_unlock(&board_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		mutex_unlock(&spi_add_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) EXPORT_SYMBOL_GPL(spi_unregister_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) int spi_controller_suspend(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	/* Basically no-ops for non-queued controllers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	if (!ctlr->queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	ret = spi_stop_queue(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		dev_err(&ctlr->dev, "queue stop failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) EXPORT_SYMBOL_GPL(spi_controller_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) int spi_controller_resume(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	if (!ctlr->queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	ret = spi_start_queue(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		dev_err(&ctlr->dev, "queue restart failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) EXPORT_SYMBOL_GPL(spi_controller_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) static int __spi_controller_match(struct device *dev, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	const u16 *bus_num = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	ctlr = container_of(dev, struct spi_controller, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	return ctlr->bus_num == *bus_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)  * spi_busnum_to_master - look up master associated with bus_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)  * @bus_num: the master's bus number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)  * This call may be used with devices that are registered after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)  * arch init time.  It returns a refcounted pointer to the relevant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)  * spi_controller (which the caller must release), or NULL if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)  * no such master registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)  * Return: the SPI master structure on success, else NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) struct spi_controller *spi_busnum_to_master(u16 bus_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	struct device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	struct spi_controller	*ctlr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	dev = class_find_device(&spi_master_class, NULL, &bus_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 				__spi_controller_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		ctlr = container_of(dev, struct spi_controller, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	/* reference got in class_find_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	return ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) EXPORT_SYMBOL_GPL(spi_busnum_to_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) /* Core methods for SPI resource management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)  * spi_res_alloc - allocate a spi resource that is life-cycle managed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)  *                 during the processing of a spi_message while using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)  *                 spi_transfer_one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)  * @spi:     the spi device for which we allocate memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)  * @release: the release code to execute for this resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)  * @size:    size to alloc and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)  * @gfp:     GFP allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)  * Return: the pointer to the allocated data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)  * This may get enhanced in the future to allocate from a memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)  * of the @spi_device or @spi_controller to avoid repeated allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) void *spi_res_alloc(struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		    spi_res_release_t release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 		    size_t size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	struct spi_res *sres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	sres = kzalloc(sizeof(*sres) + size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	if (!sres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	INIT_LIST_HEAD(&sres->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	sres->release = release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	return sres->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) EXPORT_SYMBOL_GPL(spi_res_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018)  * spi_res_free - free an spi resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)  * @res: pointer to the custom data of a resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) void spi_res_free(void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	struct spi_res *sres = container_of(res, struct spi_res, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	WARN_ON(!list_empty(&sres->entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	kfree(sres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) EXPORT_SYMBOL_GPL(spi_res_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)  * spi_res_add - add a spi_res to the spi_message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)  * @message: the spi message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)  * @res:     the spi_resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) void spi_res_add(struct spi_message *message, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	struct spi_res *sres = container_of(res, struct spi_res, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	WARN_ON(!list_empty(&sres->entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	list_add_tail(&sres->entry, &message->resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) EXPORT_SYMBOL_GPL(spi_res_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)  * spi_res_release - release all spi resources for this message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)  * @ctlr:  the @spi_controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)  * @message: the @spi_message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	struct spi_res *res, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 		if (res->release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 			res->release(ctlr, message, res->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 		list_del(&res->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) EXPORT_SYMBOL_GPL(spi_res_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) /* Core methods for spi_message alterations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) static void __spi_replace_transfers_release(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 					    struct spi_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 					    void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	struct spi_replaced_transfers *rxfer = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	/* call extra callback if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	if (rxfer->release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 		rxfer->release(ctlr, msg, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	/* insert replaced transfers back into the message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	/* remove the formerly inserted entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	for (i = 0; i < rxfer->inserted; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 		list_del(&rxfer->inserted_transfers[i].transfer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)  * spi_replace_transfers - replace transfers with several transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)  *                         and register change with spi_message.resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)  * @msg:           the spi_message we work upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)  * @xfer_first:    the first spi_transfer we want to replace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)  * @remove:        number of transfers to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)  * @insert:        the number of transfers we want to insert instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)  * @release:       extra release code necessary in some circumstances
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)  * @extradatasize: extra data to allocate (with alignment guarantees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)  *                 of struct @spi_transfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)  * @gfp:           gfp flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)  * Returns: pointer to @spi_replaced_transfers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)  *          PTR_ERR(...) in case of errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) struct spi_replaced_transfers *spi_replace_transfers(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	struct spi_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	struct spi_transfer *xfer_first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	size_t remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	size_t insert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	spi_replaced_release_t release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	size_t extradatasize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	struct spi_replaced_transfers *rxfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	/* allocate the structure using spi_res */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 			      struct_size(rxfer, inserted_transfers, insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 			      + extradatasize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 			      gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	if (!rxfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	/* the release code to invoke before running the generic release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 	rxfer->release = release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	/* assign extradata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	if (extradatasize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 		rxfer->extradata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 			&rxfer->inserted_transfers[insert];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	/* init the replaced_transfers list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	/* assign the list_entry after which we should reinsert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	 * the @replaced_transfers - it may be spi_message.messages!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	rxfer->replaced_after = xfer_first->transfer_list.prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 	/* remove the requested number of transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	for (i = 0; i < remove; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		/* if the entry after replaced_after it is msg->transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 		 * then we have been requested to remove more transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 		 * than are in the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		if (rxfer->replaced_after->next == &msg->transfers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 			dev_err(&msg->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 				"requested to remove more spi_transfers than are available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 			/* insert replaced transfers back into the message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 			list_splice(&rxfer->replaced_transfers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 				    rxfer->replaced_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 			/* free the spi_replace_transfer structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 			spi_res_free(rxfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 			/* and return with an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 			return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 		/* remove the entry after replaced_after from list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		 * transfers and add it to list of replaced_transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 		list_move_tail(rxfer->replaced_after->next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 			       &rxfer->replaced_transfers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	/* create copy of the given xfer with identical settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	 * based on the first transfer to get removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	for (i = 0; i < insert; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 		/* we need to run in reverse order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		/* copy all spi_transfer data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 		memcpy(xfer, xfer_first, sizeof(*xfer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 		/* add to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		list_add(&xfer->transfer_list, rxfer->replaced_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 		/* clear cs_change and delay for all but the last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 			xfer->cs_change = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 			xfer->delay_usecs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 			xfer->delay.value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 	/* set up inserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 	rxfer->inserted = insert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	/* and register it with spi_res/spi_message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	spi_res_add(msg, rxfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	return rxfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) EXPORT_SYMBOL_GPL(spi_replace_transfers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 					struct spi_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 					struct spi_transfer **xferp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 					size_t maxsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 					gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	struct spi_transfer *xfer = *xferp, *xfers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	struct spi_replaced_transfers *srt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 	size_t count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	/* calculate how many we have to replace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 	count = DIV_ROUND_UP(xfer->len, maxsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	/* create replacement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 	if (IS_ERR(srt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 		return PTR_ERR(srt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	xfers = srt->inserted_transfers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	/* now handle each of those newly inserted spi_transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	 * note that the replacements spi_transfers all are preset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	 * to the same values as *xferp, so tx_buf, rx_buf and len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	 * are all identical (as well as most others)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	 * so we just have to fix up len and the pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	 * this also includes support for the depreciated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	 * spi_message.is_dma_mapped interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	/* the first transfer just needs the length modified, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	 * run it outside the loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	/* all the others need rx_buf/tx_buf also set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 		/* update rx_buf, tx_buf and dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		if (xfers[i].rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 			xfers[i].rx_buf += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 		if (xfers[i].rx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 			xfers[i].rx_dma += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		if (xfers[i].tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 			xfers[i].tx_buf += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 		if (xfers[i].tx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 			xfers[i].tx_dma += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 		/* update length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 		xfers[i].len = min(maxsize, xfers[i].len - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	/* we set up xferp to the last entry we have inserted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	 * so that we skip those already split transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	*xferp = &xfers[count - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	/* increment statistics counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 				       transfers_split_maxsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 	SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 				       transfers_split_maxsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)  *                              when an individual transfer exceeds a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)  *                              certain size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)  * @ctlr:    the @spi_controller for this transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)  * @msg:   the @spi_message to transform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)  * @maxsize:  the maximum when to apply this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)  * @gfp: GFP allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)  * Return: status of transformation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) int spi_split_transfers_maxsize(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 				struct spi_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 				size_t maxsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 				gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	/* iterate over the transfer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	 * but note that xfer is advanced to the last transfer inserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	 * to avoid checking sizes again unnecessarily (also xfer does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	 * potentiall belong to a different list by the time the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	 * replacement has happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 		if (xfer->len > maxsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 							   maxsize, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) /* Core methods for SPI controller protocol drivers.  Some of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)  * other core methods are currently defined as inline functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 					u8 bits_per_word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 	if (ctlr->bits_per_word_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		/* Only 32 bits fit in the mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		if (bits_per_word > 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)  * spi_setup - setup SPI mode and clock rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)  * @spi: the device whose settings are being modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327)  * Context: can sleep, and no requests are queued to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329)  * SPI protocol drivers may need to update the transfer mode if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)  * device doesn't work with its default.  They may likewise need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331)  * to update clock rates or word sizes from initial values.  This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)  * changes those settings, and must be called from a context that can sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)  * effect the next time the device is selected and data is transferred to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)  * or from it.  When this function returns, the spi device is deselected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)  * Note that this call will fail if the protocol driver specifies an option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)  * that the underlying controller or its driver does not support.  For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339)  * example, not all hardware supports wire transfers using nine bit words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)  * LSB-first wire encoding, or active-high chipselects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) int spi_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	unsigned	bad_bits, ugly_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 	int		status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	/* check mode to prevent that DUAL and QUAD set at the same time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 		dev_err(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 		"setup: can not select dual and quad at the same time\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	/* help drivers fail *cleanly* when they need options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	 * that aren't supported with their current controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	 * SPI_CS_WORD has a fallback software implementation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	 * so it is ignored here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	/* nothing prevents from working with active-high CS in case if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 	 * is driven by GPIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 	if (gpio_is_valid(spi->cs_gpio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 		bad_bits &= ~SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 	ugly_bits = bad_bits &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	if (ugly_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 		dev_warn(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 			 "setup: ignoring unsupported mode bits %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 			 ugly_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 		spi->mode &= ~ugly_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 		bad_bits &= ~ugly_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	if (bad_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 			bad_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 	if (!spi->bits_per_word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 		spi->bits_per_word = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	status = __spi_validate_bits_per_word(spi->controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 					      spi->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 	if (!spi->max_speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 		spi->max_speed_hz = spi->controller->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	mutex_lock(&spi->controller->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	if (spi->controller->setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 		status = spi->controller->setup(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		status = pm_runtime_get_sync(spi->controller->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 		if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 			mutex_unlock(&spi->controller->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 			pm_runtime_put_noidle(spi->controller->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 				status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 			return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 		 * We do not want to return positive value from pm_runtime_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 		 * there are many instances of devices calling spi_setup() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 		 * checking for a non-zero return value instead of a negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 		 * return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 		spi_set_cs(spi, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 		pm_runtime_mark_last_busy(spi->controller->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		spi_set_cs(spi, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	mutex_unlock(&spi->controller->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	if (spi->rt && !spi->controller->rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 		spi->controller->rt = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 		spi_set_thread_rt(spi->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 			spi->bits_per_word, spi->max_speed_hz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 			status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) EXPORT_SYMBOL_GPL(spi_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452)  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)  * @spi: the device that requires specific CS timing configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)  * @setup: CS setup time specified via @spi_delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455)  * @hold: CS hold time specified via @spi_delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456)  * @inactive: CS inactive delay between transfers specified via @spi_delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 		      struct spi_delay *hold, struct spi_delay *inactive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 	if (spi->controller->set_cs_timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 		return spi->controller->set_cs_timing(spi, setup, hold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 						      inactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	    (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	    (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 		dev_err(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 			"Clock-cycle delays for CS not supported in SW mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	len = sizeof(struct spi_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 	/* copy delays to controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	if (setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 		memcpy(&spi->controller->cs_setup, setup, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 		memset(&spi->controller->cs_setup, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	if (hold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 		memcpy(&spi->controller->cs_hold, hold, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 		memset(&spi->controller->cs_hold, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	if (inactive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 		memcpy(&spi->controller->cs_inactive, inactive, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 		memset(&spi->controller->cs_inactive, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) EXPORT_SYMBOL_GPL(spi_set_cs_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 				       struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	int delay1, delay2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	if (delay1 < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 		return delay1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	if (delay2 < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		return delay2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	if (delay1 < delay2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 		memcpy(&xfer->word_delay, &spi->word_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 		       sizeof(xfer->word_delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) static int __spi_validate(struct spi_device *spi, struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	int w_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	if (list_empty(&message->transfers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	/* If an SPI controller does not support toggling the CS line on each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	 * for the CS line, we can emulate the CS-per-word hardware function by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 	 * splitting transfers into one-word transfers and ensuring that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	 * cs_change is set for each transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 					  spi->cs_gpiod ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 					  gpio_is_valid(spi->cs_gpio))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 		size_t maxsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 		maxsize = (spi->bits_per_word + 7) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 		/* spi_split_transfers_maxsize() requires message->spi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 		message->spi = spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 		ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 						  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 			/* don't change cs_change on the last entry in the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 			if (list_is_last(&xfer->transfer_list, &message->transfers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 			xfer->cs_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	/* Half-duplex links include original MicroWire, and ones with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	 * only one data pin like SPI_3WIRE (switches direction) or where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 	 * either MOSI or MISO is missing.  They can also be caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	 * software limitations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	    (spi->mode & SPI_3WIRE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 		unsigned flags = ctlr->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 			if (xfer->rx_buf && xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	 * Set transfer bits_per_word and max speed as spi device default if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	 * it is not set for this transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	 * Set transfer tx_nbits and rx_nbits as single transfer default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	 * Ensure transfer word_delay is at least as long as that required by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	 * device itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	message->frame_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 		xfer->effective_speed_hz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 		message->frame_length += xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 		if (!xfer->bits_per_word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 			xfer->bits_per_word = spi->bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 		if (!xfer->speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 			xfer->speed_hz = spi->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 			xfer->speed_hz = ctlr->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 		 * SPI transfer length should be multiple of SPI word size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 		 * where SPI word size should be power-of-two multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 		if (xfer->bits_per_word <= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 			w_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 		else if (xfer->bits_per_word <= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 			w_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 			w_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 		/* No partial transfers accepted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 		if (xfer->len % w_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 		if (xfer->speed_hz && ctlr->min_speed_hz &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 		    xfer->speed_hz < ctlr->min_speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 		if (xfer->tx_buf && !xfer->tx_nbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 			xfer->tx_nbits = SPI_NBITS_SINGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 		if (xfer->rx_buf && !xfer->rx_nbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 			xfer->rx_nbits = SPI_NBITS_SINGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 		/* check transfer tx/rx_nbits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 		 * 1. check the value matches one of single, dual and quad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 		 * 2. check tx/rx_nbits match the mode in spi_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 		if (xfer->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 				xfer->tx_nbits != SPI_NBITS_DUAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 				xfer->tx_nbits != SPI_NBITS_QUAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 				!(spi->mode & SPI_TX_QUAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		/* check transfer rx_nbits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 		if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 				xfer->rx_nbits != SPI_NBITS_DUAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 				xfer->rx_nbits != SPI_NBITS_QUAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 				!(spi->mode & SPI_RX_QUAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 		if (_spi_xfer_word_delay_update(xfer, spi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 	message->status = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) static int __spi_async(struct spi_device *spi, struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	 * Some controllers do not support doing regular SPI transfers. Return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	 * ENOTSUPP when this is the case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	if (!ctlr->transfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	message->spi = spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	trace_spi_message_submit(message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 	if (!ctlr->ptp_sts_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 			xfer->ptp_sts_word_pre = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 			ptp_read_system_prets(xfer->ptp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 	return ctlr->transfer(spi, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)  * spi_async - asynchronous SPI transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)  * @spi: device with which data will be exchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)  * @message: describes the data transfers, including completion callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)  * Context: any (irqs may be blocked, etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698)  * This call may be used in_irq and other contexts which can't sleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699)  * as well as from task contexts which can sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701)  * The completion callback is invoked in a context which can't sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702)  * Before that invocation, the value of message->status is undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)  * When the callback is issued, message->status holds either zero (to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704)  * indicate complete success) or a negative error code.  After that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705)  * callback returns, the driver which issued the transfer request may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)  * deallocate the associated memory; it's no longer in use by any SPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707)  * core or controller driver code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709)  * Note that although all messages to a spi_device are handled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710)  * FIFO order, messages may go to different devices in other orders.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711)  * Some device might be higher priority, or have various "hard" access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)  * time requirements, for example.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714)  * On detection of any fault during the transfer, processing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715)  * the entire message is aborted, and the device is deselected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)  * Until returning from the associated message completion callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)  * no other spi_message queued to that device will be processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)  * (This rule applies equally to all the synchronous transfer calls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)  * which are wrappers around this core asynchronous primitive.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) int spi_async(struct spi_device *spi, struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	ret = __spi_validate(spi, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 	if (ctlr->bus_lock_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 		ret = __spi_async(spi, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) EXPORT_SYMBOL_GPL(spi_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747)  * spi_async_locked - version of spi_async with exclusive bus usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)  * @spi: device with which data will be exchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749)  * @message: describes the data transfers, including completion callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)  * Context: any (irqs may be blocked, etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)  * This call may be used in_irq and other contexts which can't sleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)  * as well as from task contexts which can sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)  * The completion callback is invoked in a context which can't sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)  * Before that invocation, the value of message->status is undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)  * When the callback is issued, message->status holds either zero (to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758)  * indicate complete success) or a negative error code.  After that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)  * callback returns, the driver which issued the transfer request may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)  * deallocate the associated memory; it's no longer in use by any SPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)  * core or controller driver code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)  * Note that although all messages to a spi_device are handled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)  * FIFO order, messages may go to different devices in other orders.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)  * Some device might be higher priority, or have various "hard" access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)  * time requirements, for example.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)  * On detection of any fault during the transfer, processing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)  * the entire message is aborted, and the device is deselected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)  * Until returning from the associated message completion callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771)  * no other spi_message queued to that device will be processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772)  * (This rule applies equally to all the synchronous transfer calls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)  * which are wrappers around this core asynchronous primitive.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) int spi_async_locked(struct spi_device *spi, struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	ret = __spi_validate(spi, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 	ret = __spi_async(spi, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) EXPORT_SYMBOL_GPL(spi_async_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) /* Utility methods for SPI protocol drivers, layered on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801)  * top of the core.  Some other utility methods are defined as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802)  * inline functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) static void spi_complete(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 	complete(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) static int __spi_sync(struct spi_device *spi, struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 	DECLARE_COMPLETION_ONSTACK(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 	struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	status = __spi_validate(spi, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 	if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 	message->complete = spi_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 	message->context = &done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 	message->spi = spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 	/* If we're not using the legacy transfer method then we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	 * try to transfer in the calling context so special case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 	 * This code would be less tricky if we could remove the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 	 * support for driver implemented message queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 	if (ctlr->transfer == spi_queued_transfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 		spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 		trace_spi_message_submit(message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 		status = __spi_queued_transfer(spi, message, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 		spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 		status = spi_async_locked(spi, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	if (status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 		/* Push out the messages in the calling context if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 		 * can.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 		if (ctlr->transfer == spi_queued_transfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 			SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 						       spi_sync_immediate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 			SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 						       spi_sync_immediate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 			__spi_pump_messages(ctlr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 		wait_for_completion(&done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 		status = message->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 	message->context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865)  * spi_sync - blocking/synchronous SPI data transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866)  * @spi: device with which data will be exchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867)  * @message: describes the data transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)  * This call may only be used from a context that may sleep.  The sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)  * is non-interruptible, and has no timeout.  Low-overhead controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872)  * drivers may DMA directly into and out of the message buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)  * Note that the SPI device's chip select is active during the message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)  * and then is normally disabled between messages.  Drivers for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876)  * frequently-used devices may want to minimize costs of selecting a chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877)  * by leaving it selected in anticipation that the next message will go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)  * to the same chip.  (That may increase power usage.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880)  * Also, the caller is guaranteeing that the memory associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881)  * message will not be freed before this call returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) int spi_sync(struct spi_device *spi, struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 	mutex_lock(&spi->controller->bus_lock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 	ret = __spi_sync(spi, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 	mutex_unlock(&spi->controller->bus_lock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) EXPORT_SYMBOL_GPL(spi_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)  * spi_sync_locked - version of spi_sync with exclusive bus usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899)  * @spi: device with which data will be exchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900)  * @message: describes the data transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903)  * This call may only be used from a context that may sleep.  The sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904)  * is non-interruptible, and has no timeout.  Low-overhead controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905)  * drivers may DMA directly into and out of the message buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907)  * This call should be used by drivers that require exclusive access to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909)  * be released by a spi_bus_unlock call when the exclusive access is over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 	return __spi_sync(spi, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) EXPORT_SYMBOL_GPL(spi_sync_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920)  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921)  * @ctlr: SPI bus master that should be locked for exclusive bus access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924)  * This call may only be used from a context that may sleep.  The sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)  * is non-interruptible, and has no timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927)  * This call should be used by drivers that require exclusive access to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929)  * exclusive access is over. Data transfer must be done by spi_sync_locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930)  * and spi_async_locked calls when the SPI bus lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932)  * Return: always zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) int spi_bus_lock(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 	mutex_lock(&ctlr->bus_lock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	ctlr->bus_lock_flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	/* mutex remains locked until spi_bus_unlock is called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) EXPORT_SYMBOL_GPL(spi_bus_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951)  * spi_bus_unlock - release the lock for exclusive SPI bus usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952)  * @ctlr: SPI bus master that was locked for exclusive bus access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955)  * This call may only be used from a context that may sleep.  The sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956)  * is non-interruptible, and has no timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959)  * call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)  * Return: always zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) int spi_bus_unlock(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 	ctlr->bus_lock_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	mutex_unlock(&ctlr->bus_lock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) EXPORT_SYMBOL_GPL(spi_bus_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) /* portable code must never pass more than 32 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) static u8	*buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979)  * spi_write_then_read - SPI synchronous write followed by read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980)  * @spi: device with which data will be exchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981)  * @txbuf: data to be written (need not be dma-safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982)  * @n_tx: size of txbuf, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983)  * @rxbuf: buffer into which data will be read (need not be dma-safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984)  * @n_rx: size of rxbuf, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)  * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987)  * This performs a half duplex MicroWire style transaction with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988)  * device, sending txbuf and then reading rxbuf.  The return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989)  * is zero for success, else a negative errno status code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)  * This call may only be used from a context that may sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992)  * Parameters to this routine are always copied using a small buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)  * Performance-sensitive or bulk transfer code should instead use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994)  * spi_{async,sync}() calls with dma-safe buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996)  * Return: zero on success, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) int spi_write_then_read(struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 		const void *txbuf, unsigned n_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 		void *rxbuf, unsigned n_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 	static DEFINE_MUTEX(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 	struct spi_message	message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 	struct spi_transfer	x[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 	u8			*local_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 	 * copying here, (as a pure convenience thing), but we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 	 * keep heap costs out of the hot path unless someone else is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 	 * using the pre-allocated buffer or the transfer is too large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 				    GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 		if (!local_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 		local_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 	spi_message_init(&message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 	memset(x, 0, sizeof(x));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 	if (n_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 		x[0].len = n_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 		spi_message_add_tail(&x[0], &message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	if (n_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 		x[1].len = n_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 		spi_message_add_tail(&x[1], &message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 	memcpy(local_buf, txbuf, n_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 	x[0].tx_buf = local_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 	x[1].rx_buf = local_buf + n_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 	/* do the i/o */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 	status = spi_sync(spi, &message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 	if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 		memcpy(rxbuf, x[1].rx_buf, n_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	if (x[0].tx_buf == buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 		mutex_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 		kfree(local_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) EXPORT_SYMBOL_GPL(spi_write_then_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) #if IS_ENABLED(CONFIG_OF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) /* must call put_device() when done with returned spi_device device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) struct spi_device *of_find_spi_device_by_node(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 	return dev ? to_spi_device(dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) #endif /* IS_ENABLED(CONFIG_OF) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) #if IS_ENABLED(CONFIG_OF_DYNAMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) /* the spi controllers are not using spi_bus, so we find it with another way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 	dev = class_find_device_by_of_node(&spi_master_class, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 		dev = class_find_device_by_of_node(&spi_slave_class, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	/* reference got in class_find_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 	return container_of(dev, struct spi_controller, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) static int of_spi_notify(struct notifier_block *nb, unsigned long action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 			 void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 	struct of_reconfig_data *rd = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 	struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 	struct spi_device *spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 	switch (of_reconfig_get_state_change(action, arg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 	case OF_RECONFIG_CHANGE_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 		if (ctlr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 			return NOTIFY_OK;	/* not for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 			put_device(&ctlr->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 			return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 		spi = of_register_spi_device(ctlr, rd->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 		put_device(&ctlr->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 		if (IS_ERR(spi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 			pr_err("%s: failed to create for '%pOF'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 					__func__, rd->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 			of_node_clear_flag(rd->dn, OF_POPULATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 			return notifier_from_errno(PTR_ERR(spi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 	case OF_RECONFIG_CHANGE_REMOVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 		/* already depopulated? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 			return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 		/* find our device by node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 		spi = of_find_spi_device_by_node(rd->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 		if (spi == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 			return NOTIFY_OK;	/* no? not meant for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 		/* unregister takes one ref away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 		spi_unregister_device(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 		/* and put the reference of the find */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 		put_device(&spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) static struct notifier_block spi_of_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 	.notifier_call = of_spi_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) extern struct notifier_block spi_of_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) #if IS_ENABLED(CONFIG_ACPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) static int spi_acpi_controller_match(struct device *dev, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 	return ACPI_COMPANION(dev->parent) == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	dev = class_find_device(&spi_master_class, NULL, adev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 				spi_acpi_controller_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 		dev = class_find_device(&spi_slave_class, NULL, adev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 					spi_acpi_controller_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	return container_of(dev, struct spi_controller, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 	return to_spi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 			   void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	struct acpi_device *adev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 	struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 	struct spi_device *spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 	switch (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 	case ACPI_RECONFIG_DEVICE_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 		ctlr = acpi_spi_find_controller_by_adev(adev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 		if (!ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 		acpi_register_spi_device(ctlr, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 		put_device(&ctlr->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 	case ACPI_RECONFIG_DEVICE_REMOVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 		if (!acpi_device_enumerated(adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 		spi = acpi_spi_find_device_by_adev(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 		if (!spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 		spi_unregister_device(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 		put_device(&spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) static struct notifier_block spi_acpi_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 	.notifier_call = acpi_spi_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) extern struct notifier_block spi_acpi_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) static int __init spi_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 	int	status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 		status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 		goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 	status = bus_register(&spi_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 	status = class_register(&spi_master_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 		status = class_register(&spi_slave_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 			goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 	if (IS_ENABLED(CONFIG_ACPI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 	class_unregister(&spi_master_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 	bus_unregister(&spi_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 	buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) /* board_info is normally registered in arch_initcall(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)  * but even essential drivers wait till later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251)  * REVISIT only boardinfo really needs static linking. the rest (device and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252)  * driver registration) _could_ be dynamically linked (modular) ... costs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253)  * include needing to have boardinfo data structures be much more public.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) postcore_initcall(spi_init);