^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * drivers/w1/masters/omap_hdq.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007,2012 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file is licensed under the terms of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * version 2. This program is licensed "as is" without any warranty of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * kind, whether express or implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/w1.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define MOD_NAME "OMAP_HDQ:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define OMAP_HDQ_REVISION 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define OMAP_HDQ_TX_DATA 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define OMAP_HDQ_RX_DATA 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define OMAP_HDQ_CTRL_STATUS 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define OMAP_HDQ_INT_STATUS 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define OMAP_HDQ_FLAG_CLEAR 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define OMAP_HDQ_FLAG_SET 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define OMAP_HDQ_TIMEOUT (HZ/5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define OMAP_HDQ_MAX_USER 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static int w1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) module_param(w1_id, int, S_IRUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct hdq_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void __iomem *hdq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* lock read/write/break operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct mutex hdq_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* interrupt status and a lock for it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u8 hdq_irqstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) spinlock_t hdq_spinlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* mode: 0-HDQ 1-W1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* HDQ register I/O routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return __raw_readl(hdq_data->hdq_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) __raw_writel(val, hdq_data->hdq_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u8 val, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) | (val & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __raw_writel(new_val, hdq_data->hdq_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Wait for one or more bits in flag change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * HDQ_FLAG_SET: wait until any bit in the flag is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * return 0 on success and -ETIMEDOUT in the case of timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u8 flag, u8 flag_set, u8 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* wait for the flag clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) && time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (*status & flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) } else if (flag_set == OMAP_HDQ_FLAG_SET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* wait for the flag set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) && time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!(*status & flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Clear saved irqstatus after using an interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static u8 hdq_reset_irqstatus(struct hdq_data *hdq_data, u8 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) status = hdq_data->hdq_irqstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* this is a read-modify-write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) hdq_data->hdq_irqstatus &= ~bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* write out a byte and fill *status with HDQ_INT_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u8 tmp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto rtn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (hdq_data->hdq_irqstatus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dev_err(hdq_data->dev, "TX irqstatus not cleared (%02x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) hdq_data->hdq_irqstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* set the GO bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* wait for the TXCOMPLETE bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ret = wait_event_timeout(hdq_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) OMAP_HDQ_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dev_dbg(hdq_data->dev, "TX wait elapsed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* check irqstatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) dev_dbg(hdq_data->dev, "timeout waiting for"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) " TXCOMPLETE/RXCOMPLETE, %x\n", *status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* wait for the GO bit return to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) OMAP_HDQ_CTRL_STATUS_GO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) OMAP_HDQ_FLAG_CLEAR, &tmp_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) dev_dbg(hdq_data->dev, "timeout waiting GO bit"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) " return to zero, %x\n", tmp_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) mutex_unlock(&hdq_data->hdq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) rtn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* HDQ Interrupt service routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static irqreturn_t hdq_isr(int irq, void *_hdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct hdq_data *hdq_data = _hdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) hdq_data->hdq_irqstatus |= hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (hdq_data->hdq_irqstatus &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* wake up sleeping process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) wake_up(&hdq_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* W1 search callback function in HDQ mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u8 search_type, w1_slave_found_callback slave_found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u64 module_id, rn_le, cs, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (w1_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) module_id = w1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) module_id = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rn_le = cpu_to_le64(module_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * HDQ might not obey truly the 1-wire spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * So calculate CRC based on module parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) cs = w1_calc_crc8((u8 *)&rn_le, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) id = (cs << 56) | module_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) slave_found(master_dev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Issue break pulse to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static int omap_hdq_break(struct hdq_data *hdq_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u8 tmp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) goto rtn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (hdq_data->hdq_irqstatus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) dev_err(hdq_data->dev, "break irqstatus not cleared (%02x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) hdq_data->hdq_irqstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* set the INIT and GO bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) OMAP_HDQ_CTRL_STATUS_GO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* wait for the TIMEOUT bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ret = wait_event_timeout(hdq_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TIMEOUT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) OMAP_HDQ_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) tmp_status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) dev_dbg(hdq_data->dev, "break wait elapsed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* check irqstatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) tmp_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * check for the presence detect bit to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * set to show that the slave is responding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) dev_dbg(hdq_data->dev, "Presence bit not set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * wait for both INIT and GO bits rerurn to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * zero wait time expected for interrupt mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) &tmp_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) " return to zero, %x\n", tmp_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) mutex_unlock(&hdq_data->hdq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) rtn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) goto rtn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (pm_runtime_suspended(hdq_data->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * The RX comes immediately after TX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) wait_event_timeout(hdq_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) (hdq_data->hdq_irqstatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) & (OMAP_HDQ_INT_STATUS_RXCOMPLETE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) OMAP_HDQ_INT_STATUS_TIMEOUT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) OMAP_HDQ_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) status = hdq_reset_irqstatus(hdq_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) OMAP_HDQ_INT_STATUS_RXCOMPLETE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) OMAP_HDQ_INT_STATUS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) OMAP_HDQ_CTRL_STATUS_DIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* check irqstatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) dev_dbg(hdq_data->dev, "timeout waiting for"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) " RXCOMPLETE, %x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) } else { /* interrupt had occurred before hdq_read_byte was called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* the data is ready. Read it in! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) mutex_unlock(&hdq_data->hdq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) rtn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * W1 triplet callback function - used for searching ROM addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * Registered only when controller is in 1-wire mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static u8 omap_w1_triplet(void *_hdq, u8 bdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u8 id_bit, comp_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u8 ret = 0x3; /* no slaves responded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct hdq_data *hdq_data = _hdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) err = pm_runtime_get_sync(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) pm_runtime_put_noidle(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) goto rtn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* read id_bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) err = wait_event_timeout(hdq_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) (hdq_data->hdq_irqstatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) OMAP_HDQ_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* Must clear irqstatus for another RXCOMPLETE interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) dev_dbg(hdq_data->dev, "RX wait elapsed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* read comp_bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) err = wait_event_timeout(hdq_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) (hdq_data->hdq_irqstatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) OMAP_HDQ_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Must clear irqstatus for another RXCOMPLETE interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dev_dbg(hdq_data->dev, "RX wait elapsed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (id_bit && comp_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ret = 0x03; /* no slaves responded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!id_bit && !comp_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Both bits are valid, take the direction given */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ret = bdir ? 0x04 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* Only one bit is valid, take that direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) bdir = id_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ret = id_bit ? 0x05 : 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* write bdir bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) err = wait_event_timeout(hdq_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) (hdq_data->hdq_irqstatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) OMAP_HDQ_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* Must clear irqstatus for another TXCOMPLETE interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dev_dbg(hdq_data->dev, "TX wait elapsed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) OMAP_HDQ_CTRL_STATUS_SINGLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) mutex_unlock(&hdq_data->hdq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) rtn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pm_runtime_mark_last_busy(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) pm_runtime_put_autosuspend(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* reset callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static u8 omap_w1_reset_bus(void *_hdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct hdq_data *hdq_data = _hdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) err = pm_runtime_get_sync(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) pm_runtime_put_noidle(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) omap_hdq_break(hdq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) pm_runtime_mark_last_busy(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) pm_runtime_put_autosuspend(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Read a byte of data from the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static u8 omap_w1_read_byte(void *_hdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct hdq_data *hdq_data = _hdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) u8 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ret = pm_runtime_get_sync(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) pm_runtime_put_noidle(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) ret = hdq_read_byte(hdq_data, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) val = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) pm_runtime_mark_last_busy(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) pm_runtime_put_autosuspend(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Write a byte of data to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static void omap_w1_write_byte(void *_hdq, u8 byte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct hdq_data *hdq_data = _hdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = pm_runtime_get_sync(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) pm_runtime_put_noidle(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * We need to reset the slave before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * issuing the SKIP ROM command, else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * the slave will not work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (byte == W1_SKIP_ROM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) omap_hdq_break(hdq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ret = hdq_write_byte(hdq_data, byte, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) pm_runtime_mark_last_busy(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pm_runtime_put_autosuspend(hdq_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static struct w1_bus_master omap_w1_master = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) .read_byte = omap_w1_read_byte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) .write_byte = omap_w1_write_byte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) .reset_bus = omap_w1_reset_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int __maybe_unused omap_hdq_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct hdq_data *hdq_data = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) hdq_reg_out(hdq_data, 0, hdq_data->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static int __maybe_unused omap_hdq_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct hdq_data *hdq_data = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* select HDQ/1W mode & enable clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) hdq_data->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static const struct dev_pm_ops omap_hdq_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) SET_RUNTIME_PM_OPS(omap_hdq_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) omap_hdq_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static int omap_hdq_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct hdq_data *hdq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) u8 rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) const char *mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!hdq_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dev_dbg(&pdev->dev, "unable to allocate memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) hdq_data->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) platform_set_drvdata(pdev, hdq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (IS_ERR(hdq_data->hdq_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return PTR_ERR(hdq_data->hdq_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) mutex_init(&hdq_data->hdq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (ret < 0 || !strcmp(mode, "hdq")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) hdq_data->mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) omap_w1_master.search = omap_w1_search_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) hdq_data->mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) omap_w1_master.triplet = omap_w1_triplet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) pm_runtime_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ret = pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) goto err_w1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) spin_lock_init(&hdq_data->hdq_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ret = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dev_dbg(&pdev->dev, "could not request irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) omap_hdq_break(hdq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) pm_runtime_mark_last_busy(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pm_runtime_put_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) omap_w1_master.data = hdq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ret = w1_add_master_device(&omap_w1_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto err_w1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) err_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pm_runtime_put_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) err_w1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) pm_runtime_dont_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static int omap_hdq_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) active = pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (active < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) w1_remove_master_device(&omap_w1_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) pm_runtime_dont_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (active >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pm_runtime_put_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static const struct of_device_id omap_hdq_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) { .compatible = "ti,omap3-1w" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) { .compatible = "ti,am4372-hdq" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static struct platform_driver omap_hdq_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) .probe = omap_hdq_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) .remove = omap_hdq_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) .name = "omap_hdq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) .of_match_table = omap_hdq_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) .pm = &omap_hdq_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) module_platform_driver(omap_hdq_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) MODULE_AUTHOR("Texas Instruments");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) MODULE_DESCRIPTION("HDQ-1W driver Library");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) MODULE_LICENSE("GPL");