^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This file is part of STM32 ADC driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/iio/iio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/iio/buffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/iio/timer/stm32-lptim-trigger.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/iio/timer/stm32-timer-trigger.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/iio/trigger.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/iio/trigger_consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/iio/triggered_buffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "stm32-adc-core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* Number of linear calibration shadow registers / LINCALRDYW control bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define STM32H7_LINCALFACT_NUM 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* BOOST bit must be set on STM32H7 when ADC clock is above 20MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define STM32H7_BOOST_CLKRATE 20000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define STM32_ADC_CH_MAX 20 /* max number of channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define STM32_ADC_CH_SZ 10 /* max channel name size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define STM32_ADC_MAX_SQ 16 /* SQ1..SQ16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define STM32_ADC_MAX_SMP 7 /* SMPx range is [0..7] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define STM32_ADC_TIMEOUT_US 100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define STM32_ADC_TIMEOUT (msecs_to_jiffies(STM32_ADC_TIMEOUT_US / 1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define STM32_ADC_HW_STOP_DELAY_MS 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define STM32_DMA_BUFFER_SIZE PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* External trigger enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) enum stm32_adc_exten {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) STM32_EXTEN_SWTRIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) STM32_EXTEN_HWTRIG_RISING_EDGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) STM32_EXTEN_HWTRIG_FALLING_EDGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) STM32_EXTEN_HWTRIG_BOTH_EDGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* extsel - trigger mux selection value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) enum stm32_adc_extsel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) STM32_EXT0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) STM32_EXT1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) STM32_EXT2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) STM32_EXT3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) STM32_EXT4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) STM32_EXT5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) STM32_EXT6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) STM32_EXT7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) STM32_EXT8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) STM32_EXT9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) STM32_EXT10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) STM32_EXT11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) STM32_EXT12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) STM32_EXT13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) STM32_EXT14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) STM32_EXT15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) STM32_EXT16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) STM32_EXT17,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) STM32_EXT18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) STM32_EXT19,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) STM32_EXT20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * struct stm32_adc_trig_info - ADC trigger info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * @name: name of the trigger, corresponding to its source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @extsel: trigger selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct stm32_adc_trig_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) enum stm32_adc_extsel extsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * struct stm32_adc_calib - optional adc calibration data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @calfact_s: Calibration offset for single ended channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @calfact_d: Calibration offset in differential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @lincalfact: Linearity calibration factor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @calibrated: Indicates calibration status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct stm32_adc_calib {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 calfact_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 calfact_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u32 lincalfact[STM32H7_LINCALFACT_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) bool calibrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * struct stm32_adc_regs - stm32 ADC misc registers & bitfield desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * @reg: register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @mask: bitfield mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * @shift: left shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct stm32_adc_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * struct stm32_adc_regspec - stm32 registers definition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @dr: data register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @ier_eoc: interrupt enable register & eocie bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @ier_ovr: interrupt enable register & overrun bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * @isr_eoc: interrupt status register & eoc bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @isr_ovr: interrupt status register & overrun bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * @sqr: reference to sequence registers array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @exten: trigger control register & bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @extsel: trigger selection register & bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * @res: resolution selection register & bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * @smpr: smpr1 & smpr2 registers offset array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * @smp_bits: smpr1 & smpr2 index and bitfields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct stm32_adc_regspec {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) const u32 dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) const struct stm32_adc_regs ier_eoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) const struct stm32_adc_regs ier_ovr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) const struct stm32_adc_regs isr_eoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) const struct stm32_adc_regs isr_ovr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) const struct stm32_adc_regs *sqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) const struct stm32_adc_regs exten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) const struct stm32_adc_regs extsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) const struct stm32_adc_regs res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) const u32 smpr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) const struct stm32_adc_regs *smp_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct stm32_adc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * struct stm32_adc_cfg - stm32 compatible configuration data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @regs: registers descriptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @adc_info: per instance input channels definitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @trigs: external trigger sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * @clk_required: clock is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @has_vregready: vregready status flag presence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @prepare: optional prepare routine (power-up, enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @start_conv: routine to start conversions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @stop_conv: routine to stop conversions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * @unprepare: optional unprepare routine (disable, power-down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @irq_clear: routine to clear irqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * @smp_cycles: programmable sampling time (ADC clock cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct stm32_adc_cfg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) const struct stm32_adc_regspec *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) const struct stm32_adc_info *adc_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct stm32_adc_trig_info *trigs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) bool clk_required;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) bool has_vregready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int (*prepare)(struct iio_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void (*start_conv)(struct iio_dev *, bool dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) void (*stop_conv)(struct iio_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void (*unprepare)(struct iio_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void (*irq_clear)(struct iio_dev *indio_dev, u32 msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) const unsigned int *smp_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * struct stm32_adc - private data of each ADC IIO instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @common: reference to ADC block common data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @offset: ADC instance register offset in ADC block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @cfg: compatible configuration data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @completion: end of single conversion completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @buffer: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * @clk: clock for this adc instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @irq: interrupt for this adc instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @lock: spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @bufi: data buffer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @num_conv: expected number of scan conversions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @res: data resolution (e.g. RES bitfield value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @trigger_polarity: external trigger polarity (e.g. exten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * @dma_chan: dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * @rx_buf: dma rx buffer cpu address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @rx_dma_buf: dma rx buffer bus address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @rx_buf_sz: dma rx buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @difsel: bitmask to set single-ended/differential channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @pcsel: bitmask to preselect channels on some devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @smpr_val: sampling time settings (e.g. smpr1 / smpr2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @cal: optional calibration data on some devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * @chan_name: channel name array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct stm32_adc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct stm32_adc_common *common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) const struct stm32_adc_cfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u16 buffer[STM32_ADC_MAX_SQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) spinlock_t lock; /* interrupt lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned int bufi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned int num_conv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u32 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 trigger_polarity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u8 *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dma_addr_t rx_dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned int rx_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u32 difsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u32 pcsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u32 smpr_val[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct stm32_adc_calib cal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) char chan_name[STM32_ADC_CH_MAX][STM32_ADC_CH_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct stm32_adc_diff_channel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u32 vinp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u32 vinn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * struct stm32_adc_info - stm32 ADC, per instance config data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @max_channels: Number of channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @resolutions: available resolutions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * @num_res: number of available resolutions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct stm32_adc_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int max_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) const unsigned int *resolutions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) const unsigned int num_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static const unsigned int stm32f4_adc_resolutions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* sorted values so the index matches RES[1:0] in STM32F4_ADC_CR1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 12, 10, 8, 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* stm32f4 can have up to 16 channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static const struct stm32_adc_info stm32f4_adc_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .max_channels = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .resolutions = stm32f4_adc_resolutions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .num_res = ARRAY_SIZE(stm32f4_adc_resolutions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static const unsigned int stm32h7_adc_resolutions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* sorted values so the index matches RES[2:0] in STM32H7_ADC_CFGR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 16, 14, 12, 10, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* stm32h7 can have up to 20 channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static const struct stm32_adc_info stm32h7_adc_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .max_channels = STM32_ADC_CH_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .resolutions = stm32h7_adc_resolutions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .num_res = ARRAY_SIZE(stm32h7_adc_resolutions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * stm32f4_sq - describe regular sequence registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * - L: sequence len (register & bit field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * - SQ1..SQ16: sequence entries (register & bit field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static const struct stm32_adc_regs stm32f4_sq[STM32_ADC_MAX_SQ + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* L: len bit field description to be kept as first element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) { STM32F4_ADC_SQR1, GENMASK(23, 20), 20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* SQ1..SQ16 registers & bit fields (reg, mask, shift) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) { STM32F4_ADC_SQR3, GENMASK(4, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) { STM32F4_ADC_SQR3, GENMASK(9, 5), 5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) { STM32F4_ADC_SQR3, GENMASK(14, 10), 10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) { STM32F4_ADC_SQR3, GENMASK(19, 15), 15 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) { STM32F4_ADC_SQR3, GENMASK(24, 20), 20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) { STM32F4_ADC_SQR3, GENMASK(29, 25), 25 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) { STM32F4_ADC_SQR2, GENMASK(4, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) { STM32F4_ADC_SQR2, GENMASK(9, 5), 5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) { STM32F4_ADC_SQR2, GENMASK(14, 10), 10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) { STM32F4_ADC_SQR2, GENMASK(19, 15), 15 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) { STM32F4_ADC_SQR2, GENMASK(24, 20), 20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) { STM32F4_ADC_SQR2, GENMASK(29, 25), 25 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) { STM32F4_ADC_SQR1, GENMASK(4, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) { STM32F4_ADC_SQR1, GENMASK(9, 5), 5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) { STM32F4_ADC_SQR1, GENMASK(14, 10), 10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) { STM32F4_ADC_SQR1, GENMASK(19, 15), 15 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* STM32F4 external trigger sources for all instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static struct stm32_adc_trig_info stm32f4_adc_trigs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) { TIM1_CH1, STM32_EXT0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) { TIM1_CH2, STM32_EXT1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) { TIM1_CH3, STM32_EXT2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) { TIM2_CH2, STM32_EXT3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) { TIM2_CH3, STM32_EXT4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) { TIM2_CH4, STM32_EXT5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) { TIM2_TRGO, STM32_EXT6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) { TIM3_CH1, STM32_EXT7 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) { TIM3_TRGO, STM32_EXT8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) { TIM4_CH4, STM32_EXT9 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) { TIM5_CH1, STM32_EXT10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) { TIM5_CH2, STM32_EXT11 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) { TIM5_CH3, STM32_EXT12 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) { TIM8_CH1, STM32_EXT13 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) { TIM8_TRGO, STM32_EXT14 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {}, /* sentinel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * stm32f4_smp_bits[] - describe sampling time register index & bit fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Sorted so it can be indexed by channel number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static const struct stm32_adc_regs stm32f4_smp_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* STM32F4_ADC_SMPR2: smpr[] index, mask, shift for SMP0 to SMP9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) { 1, GENMASK(2, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) { 1, GENMASK(5, 3), 3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) { 1, GENMASK(8, 6), 6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) { 1, GENMASK(11, 9), 9 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) { 1, GENMASK(14, 12), 12 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) { 1, GENMASK(17, 15), 15 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) { 1, GENMASK(20, 18), 18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) { 1, GENMASK(23, 21), 21 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) { 1, GENMASK(26, 24), 24 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) { 1, GENMASK(29, 27), 27 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* STM32F4_ADC_SMPR1, smpr[] index, mask, shift for SMP10 to SMP18 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) { 0, GENMASK(2, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) { 0, GENMASK(5, 3), 3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) { 0, GENMASK(8, 6), 6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) { 0, GENMASK(11, 9), 9 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) { 0, GENMASK(14, 12), 12 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) { 0, GENMASK(17, 15), 15 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) { 0, GENMASK(20, 18), 18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) { 0, GENMASK(23, 21), 21 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) { 0, GENMASK(26, 24), 24 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* STM32F4 programmable sampling time (ADC clock cycles) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static const unsigned int stm32f4_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 3, 15, 28, 56, 84, 112, 144, 480,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static const struct stm32_adc_regspec stm32f4_adc_regspec = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) .dr = STM32F4_ADC_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .ier_eoc = { STM32F4_ADC_CR1, STM32F4_EOCIE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .ier_ovr = { STM32F4_ADC_CR1, STM32F4_OVRIE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .isr_eoc = { STM32F4_ADC_SR, STM32F4_EOC },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .isr_ovr = { STM32F4_ADC_SR, STM32F4_OVR },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .sqr = stm32f4_sq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) .exten = { STM32F4_ADC_CR2, STM32F4_EXTEN_MASK, STM32F4_EXTEN_SHIFT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) .extsel = { STM32F4_ADC_CR2, STM32F4_EXTSEL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) STM32F4_EXTSEL_SHIFT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .res = { STM32F4_ADC_CR1, STM32F4_RES_MASK, STM32F4_RES_SHIFT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) .smpr = { STM32F4_ADC_SMPR1, STM32F4_ADC_SMPR2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) .smp_bits = stm32f4_smp_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static const struct stm32_adc_regs stm32h7_sq[STM32_ADC_MAX_SQ + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* L: len bit field description to be kept as first element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) { STM32H7_ADC_SQR1, GENMASK(3, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* SQ1..SQ16 registers & bit fields (reg, mask, shift) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) { STM32H7_ADC_SQR1, GENMASK(10, 6), 6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) { STM32H7_ADC_SQR1, GENMASK(16, 12), 12 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) { STM32H7_ADC_SQR1, GENMASK(22, 18), 18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) { STM32H7_ADC_SQR1, GENMASK(28, 24), 24 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) { STM32H7_ADC_SQR2, GENMASK(4, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) { STM32H7_ADC_SQR2, GENMASK(10, 6), 6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) { STM32H7_ADC_SQR2, GENMASK(16, 12), 12 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) { STM32H7_ADC_SQR2, GENMASK(22, 18), 18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) { STM32H7_ADC_SQR2, GENMASK(28, 24), 24 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) { STM32H7_ADC_SQR3, GENMASK(4, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) { STM32H7_ADC_SQR3, GENMASK(10, 6), 6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) { STM32H7_ADC_SQR3, GENMASK(16, 12), 12 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) { STM32H7_ADC_SQR3, GENMASK(22, 18), 18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) { STM32H7_ADC_SQR3, GENMASK(28, 24), 24 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) { STM32H7_ADC_SQR4, GENMASK(4, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) { STM32H7_ADC_SQR4, GENMASK(10, 6), 6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* STM32H7 external trigger sources for all instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static struct stm32_adc_trig_info stm32h7_adc_trigs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) { TIM1_CH1, STM32_EXT0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) { TIM1_CH2, STM32_EXT1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) { TIM1_CH3, STM32_EXT2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) { TIM2_CH2, STM32_EXT3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) { TIM3_TRGO, STM32_EXT4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) { TIM4_CH4, STM32_EXT5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) { TIM8_TRGO, STM32_EXT7 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) { TIM8_TRGO2, STM32_EXT8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) { TIM1_TRGO, STM32_EXT9 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) { TIM1_TRGO2, STM32_EXT10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) { TIM2_TRGO, STM32_EXT11 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) { TIM4_TRGO, STM32_EXT12 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) { TIM6_TRGO, STM32_EXT13 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) { TIM15_TRGO, STM32_EXT14 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) { TIM3_CH4, STM32_EXT15 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) { LPTIM1_OUT, STM32_EXT18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) { LPTIM2_OUT, STM32_EXT19 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) { LPTIM3_OUT, STM32_EXT20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * stm32h7_smp_bits - describe sampling time register index & bit fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * Sorted so it can be indexed by channel number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static const struct stm32_adc_regs stm32h7_smp_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* STM32H7_ADC_SMPR1, smpr[] index, mask, shift for SMP0 to SMP9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) { 0, GENMASK(2, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) { 0, GENMASK(5, 3), 3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) { 0, GENMASK(8, 6), 6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) { 0, GENMASK(11, 9), 9 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) { 0, GENMASK(14, 12), 12 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) { 0, GENMASK(17, 15), 15 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) { 0, GENMASK(20, 18), 18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) { 0, GENMASK(23, 21), 21 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) { 0, GENMASK(26, 24), 24 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) { 0, GENMASK(29, 27), 27 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* STM32H7_ADC_SMPR2, smpr[] index, mask, shift for SMP10 to SMP19 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) { 1, GENMASK(2, 0), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) { 1, GENMASK(5, 3), 3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) { 1, GENMASK(8, 6), 6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) { 1, GENMASK(11, 9), 9 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) { 1, GENMASK(14, 12), 12 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) { 1, GENMASK(17, 15), 15 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) { 1, GENMASK(20, 18), 18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) { 1, GENMASK(23, 21), 21 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) { 1, GENMASK(26, 24), 24 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) { 1, GENMASK(29, 27), 27 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* STM32H7 programmable sampling time (ADC clock cycles, rounded down) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static const unsigned int stm32h7_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 1, 2, 8, 16, 32, 64, 387, 810,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static const struct stm32_adc_regspec stm32h7_adc_regspec = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) .dr = STM32H7_ADC_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) .ier_eoc = { STM32H7_ADC_IER, STM32H7_EOCIE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) .ier_ovr = { STM32H7_ADC_IER, STM32H7_OVRIE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) .isr_eoc = { STM32H7_ADC_ISR, STM32H7_EOC },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) .isr_ovr = { STM32H7_ADC_ISR, STM32H7_OVR },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .sqr = stm32h7_sq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) .exten = { STM32H7_ADC_CFGR, STM32H7_EXTEN_MASK, STM32H7_EXTEN_SHIFT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) .extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) STM32H7_EXTSEL_SHIFT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) .res = { STM32H7_ADC_CFGR, STM32H7_RES_MASK, STM32H7_RES_SHIFT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) .smpr = { STM32H7_ADC_SMPR1, STM32H7_ADC_SMPR2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) .smp_bits = stm32h7_smp_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * STM32 ADC registers access routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * @adc: stm32 adc instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * @reg: reg offset in adc instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Note: All instances share same base, with 0x0, 0x100 or 0x200 offset resp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * for adc1, adc2 and adc3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static u32 stm32_adc_readl(struct stm32_adc *adc, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return readl_relaxed(adc->common->base + adc->offset + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) #define stm32_adc_readl_addr(addr) stm32_adc_readl(adc, addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) #define stm32_adc_readl_poll_timeout(reg, val, cond, sleep_us, timeout_us) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) readx_poll_timeout(stm32_adc_readl_addr, reg, val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) cond, sleep_us, timeout_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static u16 stm32_adc_readw(struct stm32_adc *adc, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return readw_relaxed(adc->common->base + adc->offset + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static void stm32_adc_writel(struct stm32_adc *adc, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) writel_relaxed(val, adc->common->base + adc->offset + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static void stm32_adc_set_bits(struct stm32_adc *adc, u32 reg, u32 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) spin_lock_irqsave(&adc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) | bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) spin_unlock_irqrestore(&adc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static void stm32_adc_clr_bits(struct stm32_adc *adc, u32 reg, u32 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) spin_lock_irqsave(&adc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) & ~bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) spin_unlock_irqrestore(&adc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * stm32_adc_conv_irq_enable() - Enable end of conversion interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @adc: stm32 adc instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static void stm32_adc_conv_irq_enable(struct stm32_adc *adc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) stm32_adc_set_bits(adc, adc->cfg->regs->ier_eoc.reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) adc->cfg->regs->ier_eoc.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * stm32_adc_conv_irq_disable() - Disable end of conversion interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @adc: stm32 adc instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) stm32_adc_clr_bits(adc, adc->cfg->regs->ier_eoc.reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) adc->cfg->regs->ier_eoc.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void stm32_adc_ovr_irq_enable(struct stm32_adc *adc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) stm32_adc_set_bits(adc, adc->cfg->regs->ier_ovr.reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) adc->cfg->regs->ier_ovr.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static void stm32_adc_ovr_irq_disable(struct stm32_adc *adc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) stm32_adc_clr_bits(adc, adc->cfg->regs->ier_ovr.reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) adc->cfg->regs->ier_ovr.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static void stm32_adc_set_res(struct stm32_adc *adc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) const struct stm32_adc_regs *res = &adc->cfg->regs->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) val = stm32_adc_readl(adc, res->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) val = (val & ~res->mask) | (adc->res << res->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) stm32_adc_writel(adc, res->reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static int stm32_adc_hw_stop(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct iio_dev *indio_dev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (adc->cfg->unprepare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) adc->cfg->unprepare(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (adc->clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) clk_disable_unprepare(adc->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static int stm32_adc_hw_start(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct iio_dev *indio_dev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (adc->clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ret = clk_prepare_enable(adc->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) stm32_adc_set_res(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (adc->cfg->prepare) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ret = adc->cfg->prepare(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto err_clk_dis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) err_clk_dis:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (adc->clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) clk_disable_unprepare(adc->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * stm32f4_adc_start_conv() - Start conversions for regular channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * @indio_dev: IIO device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * @dma: use dma to transfer conversion result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * Start conversions for regular channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * Also take care of normal or DMA mode. Circular DMA may be used for regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * conversions, in IIO buffer modes. Otherwise, use ADC interrupt with direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * DR read instead (e.g. read_raw, or triggered buffer mode without DMA).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static void stm32f4_adc_start_conv(struct iio_dev *indio_dev, bool dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) stm32_adc_set_bits(adc, STM32F4_ADC_CR2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) STM32F4_DMA | STM32F4_DDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_EOCS | STM32F4_ADON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* Wait for Power-up time (tSTAB from datasheet) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) usleep_range(2, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Software start ? (e.g. trigger detection disabled ?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (!(stm32_adc_readl(adc, STM32F4_ADC_CR2) & STM32F4_EXTEN_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_SWSTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static void stm32f4_adc_stop_conv(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) stm32_adc_clr_bits(adc, STM32F4_ADC_SR, STM32F4_STRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) stm32_adc_clr_bits(adc, STM32F4_ADC_CR2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) STM32F4_ADON | STM32F4_DMA | STM32F4_DDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void stm32f4_adc_irq_clear(struct iio_dev *indio_dev, u32 msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) stm32_adc_clr_bits(adc, adc->cfg->regs->isr_eoc.reg, msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static void stm32h7_adc_start_conv(struct iio_dev *indio_dev, bool dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) enum stm32h7_adc_dmngt dmngt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dmngt = STM32H7_DMNGT_DMA_CIRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dmngt = STM32H7_DMNGT_DR_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) spin_lock_irqsave(&adc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) val = stm32_adc_readl(adc, STM32H7_ADC_CFGR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) val = (val & ~STM32H7_DMNGT_MASK) | (dmngt << STM32H7_DMNGT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) stm32_adc_writel(adc, STM32H7_ADC_CFGR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) spin_unlock_irqrestore(&adc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADSTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static void stm32h7_adc_stop_conv(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADSTP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) !(val & (STM32H7_ADSTART)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 100, STM32_ADC_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) dev_warn(&indio_dev->dev, "stop failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) stm32_adc_clr_bits(adc, STM32H7_ADC_CFGR, STM32H7_DMNGT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static void stm32h7_adc_irq_clear(struct iio_dev *indio_dev, u32 msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* On STM32H7 IRQs are cleared by writing 1 into ISR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) stm32_adc_set_bits(adc, adc->cfg->regs->isr_eoc.reg, msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static int stm32h7_adc_exit_pwr_down(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* Exit deep power down, then enable ADC voltage regulator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_DEEPPWD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADVREGEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (adc->common->rate > STM32H7_BOOST_CLKRATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_BOOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Wait for startup time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (!adc->cfg->has_vregready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) usleep_range(10, 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_ISR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) val & STM32MP1_VREGREADY, 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) STM32_ADC_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_DEEPPWD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dev_err(&indio_dev->dev, "Failed to exit power down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static void stm32h7_adc_enter_pwr_down(struct stm32_adc *adc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_BOOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Setting DEEPPWD disables ADC vreg and clears ADVREGEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_DEEPPWD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static int stm32h7_adc_enable(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Poll for ADRDY to be set (after adc startup time) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_ISR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) val & STM32H7_ADRDY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 100, STM32_ADC_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) dev_err(&indio_dev->dev, "Failed to enable ADC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Clear ADRDY by writing one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static void stm32h7_adc_disable(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /* Disable ADC and wait until it's effectively disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) !(val & STM32H7_ADEN), 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) STM32_ADC_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) dev_warn(&indio_dev->dev, "Failed to disable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * stm32h7_adc_read_selfcalib() - read calibration shadow regs, save result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * @indio_dev: IIO device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Note: Must be called once ADC is enabled, so LINCALRDYW[1..6] are writable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static int stm32h7_adc_read_selfcalib(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) u32 lincalrdyw_mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* Read linearity calibration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) lincalrdyw_mask = STM32H7_LINCALRDYW6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) for (i = STM32H7_LINCALFACT_NUM - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* Clear STM32H7_LINCALRDYW[6..1]: transfer calib to CALFACT2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) stm32_adc_clr_bits(adc, STM32H7_ADC_CR, lincalrdyw_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* Poll: wait calib data to be ready in CALFACT2 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) !(val & lincalrdyw_mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 100, STM32_ADC_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) dev_err(&indio_dev->dev, "Failed to read calfact\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) val = stm32_adc_readl(adc, STM32H7_ADC_CALFACT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) adc->cal.lincalfact[i] = (val & STM32H7_LINCALFACT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) adc->cal.lincalfact[i] >>= STM32H7_LINCALFACT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) lincalrdyw_mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Read offset calibration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) val = stm32_adc_readl(adc, STM32H7_ADC_CALFACT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) adc->cal.calfact_s = (val & STM32H7_CALFACT_S_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) adc->cal.calfact_s >>= STM32H7_CALFACT_S_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) adc->cal.calfact_d = (val & STM32H7_CALFACT_D_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) adc->cal.calfact_d >>= STM32H7_CALFACT_D_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) adc->cal.calibrated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * stm32h7_adc_restore_selfcalib() - Restore saved self-calibration result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @indio_dev: IIO device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * Note: ADC must be enabled, with no on-going conversions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static int stm32h7_adc_restore_selfcalib(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u32 lincalrdyw_mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) val = (adc->cal.calfact_s << STM32H7_CALFACT_S_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) (adc->cal.calfact_d << STM32H7_CALFACT_D_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) stm32_adc_writel(adc, STM32H7_ADC_CALFACT, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) lincalrdyw_mask = STM32H7_LINCALRDYW6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) for (i = STM32H7_LINCALFACT_NUM - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * Write saved calibration data to shadow registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * Write CALFACT2, and set LINCALRDYW[6..1] bit to trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * data write. Then poll to wait for complete transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) val = adc->cal.lincalfact[i] << STM32H7_LINCALFACT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) stm32_adc_writel(adc, STM32H7_ADC_CALFACT2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) stm32_adc_set_bits(adc, STM32H7_ADC_CR, lincalrdyw_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) val & lincalrdyw_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 100, STM32_ADC_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) dev_err(&indio_dev->dev, "Failed to write calfact\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * Read back calibration data, has two effects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * - It ensures bits LINCALRDYW[6..1] are kept cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * for next time calibration needs to be restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * - BTW, bit clear triggers a read, then check data has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * correctly written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) stm32_adc_clr_bits(adc, STM32H7_ADC_CR, lincalrdyw_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) !(val & lincalrdyw_mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 100, STM32_ADC_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) dev_err(&indio_dev->dev, "Failed to read calfact\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) val = stm32_adc_readl(adc, STM32H7_ADC_CALFACT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (val != adc->cal.lincalfact[i] << STM32H7_LINCALFACT_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dev_err(&indio_dev->dev, "calfact not consistent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) lincalrdyw_mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * Fixed timeout value for ADC calibration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * worst cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * - low clock frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * - maximum prescalers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Calibration requires:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * - 131,072 ADC clock cycle for the linear calibration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * - 20 ADC clock cycle for the offset calibration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * Set to 100ms for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) #define STM32H7_ADC_CALIB_TIMEOUT_US 100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * stm32h7_adc_selfcalib() - Procedure to calibrate ADC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * @indio_dev: IIO device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Note: Must be called once ADC is out of power down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static int stm32h7_adc_selfcalib(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (adc->cal.calibrated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Select calibration mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * - Offset calibration for single ended inputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * - No linearity calibration (do it later, before reading it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADCALDIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADCALLIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* Start calibration, then wait for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) !(val & STM32H7_ADCAL), 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) STM32H7_ADC_CALIB_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dev_err(&indio_dev->dev, "calibration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * Select calibration mode, then start calibration:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * - Offset calibration for differential input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * - Linearity calibration (needs to be done only once for single/diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * will run simultaneously with offset calibration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) stm32_adc_set_bits(adc, STM32H7_ADC_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) STM32H7_ADCALDIF | STM32H7_ADCALLIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) !(val & STM32H7_ADCAL), 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) STM32H7_ADC_CALIB_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dev_err(&indio_dev->dev, "calibration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) stm32_adc_clr_bits(adc, STM32H7_ADC_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) STM32H7_ADCALDIF | STM32H7_ADCALLIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * stm32h7_adc_prepare() - Leave power down mode to enable ADC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * @indio_dev: IIO device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * Leave power down mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Configure channels as single ended or differential before enabling ADC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * Enable ADC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * Restore calibration data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * Pre-select channels that may be used in PCSEL (required by input MUX / IO):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * - Only one input is selected for single ended (e.g. 'vinp')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * - Two inputs are selected for differential channels (e.g. 'vinp' & 'vinn')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) static int stm32h7_adc_prepare(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int calib, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ret = stm32h7_adc_exit_pwr_down(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ret = stm32h7_adc_selfcalib(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) goto pwr_dwn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) calib = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) stm32_adc_writel(adc, STM32H7_ADC_DIFSEL, adc->difsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) ret = stm32h7_adc_enable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) goto pwr_dwn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* Either restore or read calibration result for future reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (calib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ret = stm32h7_adc_restore_selfcalib(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ret = stm32h7_adc_read_selfcalib(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) goto disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) stm32_adc_writel(adc, STM32H7_ADC_PCSEL, adc->pcsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) stm32h7_adc_disable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) pwr_dwn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) stm32h7_adc_enter_pwr_down(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static void stm32h7_adc_unprepare(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) stm32h7_adc_disable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) stm32h7_adc_enter_pwr_down(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * stm32_adc_conf_scan_seq() - Build regular channels scan sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * @indio_dev: IIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * @scan_mask: channels to be converted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * Conversion sequence :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * Apply sampling time settings for all channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * Configure ADC scan sequence based on selected channels in scan_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * Add channels to SQR registers, from scan_mask LSB to MSB, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * program sequence len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) const unsigned long *scan_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) const struct stm32_adc_regs *sqr = adc->cfg->regs->sqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) const struct iio_chan_spec *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) u32 val, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* Apply sampling time settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) stm32_adc_writel(adc, adc->cfg->regs->smpr[0], adc->smpr_val[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) stm32_adc_writel(adc, adc->cfg->regs->smpr[1], adc->smpr_val[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) for_each_set_bit(bit, scan_mask, indio_dev->masklength) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) chan = indio_dev->channels + bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * Assign one channel per SQ entry in regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * sequence, starting with SQ1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (i > STM32_ADC_MAX_SQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dev_dbg(&indio_dev->dev, "%s chan %d to SQ%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) __func__, chan->channel, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) val = stm32_adc_readl(adc, sqr[i].reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) val &= ~sqr[i].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) val |= chan->channel << sqr[i].shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) stm32_adc_writel(adc, sqr[i].reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* Sequence len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) val = stm32_adc_readl(adc, sqr[0].reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) val &= ~sqr[0].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) val |= ((i - 1) << sqr[0].shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) stm32_adc_writel(adc, sqr[0].reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * stm32_adc_get_trig_extsel() - Get external trigger selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * @indio_dev: IIO device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * @trig: trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * Returns trigger extsel value, if trig matches, -EINVAL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static int stm32_adc_get_trig_extsel(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct iio_trigger *trig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* lookup triggers registered by stm32 timer trigger driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) for (i = 0; adc->cfg->trigs[i].name; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * Checking both stm32 timer trigger type and trig name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * should be safe against arbitrary trigger names.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if ((is_stm32_timer_trigger(trig) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) is_stm32_lptim_trigger(trig)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) !strcmp(adc->cfg->trigs[i].name, trig->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return adc->cfg->trigs[i].extsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * stm32_adc_set_trig() - Set a regular trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * @indio_dev: IIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * @trig: IIO trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * Set trigger source/polarity (e.g. SW, or HW with polarity) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * - if HW trigger disabled (e.g. trig == NULL, conversion launched by sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * - if HW trigger enabled, set source & polarity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static int stm32_adc_set_trig(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct iio_trigger *trig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) u32 val, extsel = 0, exten = STM32_EXTEN_SWTRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (trig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ret = stm32_adc_get_trig_extsel(indio_dev, trig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* set trigger source and polarity (default to rising edge) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) extsel = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) exten = adc->trigger_polarity + STM32_EXTEN_HWTRIG_RISING_EDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) spin_lock_irqsave(&adc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) val = stm32_adc_readl(adc, adc->cfg->regs->exten.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) val &= ~(adc->cfg->regs->exten.mask | adc->cfg->regs->extsel.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) val |= exten << adc->cfg->regs->exten.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) val |= extsel << adc->cfg->regs->extsel.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) stm32_adc_writel(adc, adc->cfg->regs->exten.reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) spin_unlock_irqrestore(&adc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static int stm32_adc_set_trig_pol(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) const struct iio_chan_spec *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) adc->trigger_polarity = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static int stm32_adc_get_trig_pol(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) const struct iio_chan_spec *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return adc->trigger_polarity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static const char * const stm32_trig_pol_items[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) "rising-edge", "falling-edge", "both-edges",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static const struct iio_enum stm32_adc_trig_pol = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) .items = stm32_trig_pol_items,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) .num_items = ARRAY_SIZE(stm32_trig_pol_items),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .get = stm32_adc_get_trig_pol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .set = stm32_adc_set_trig_pol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * stm32_adc_single_conv() - Performs a single conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * @indio_dev: IIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * @chan: IIO channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * @res: conversion result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * The function performs a single conversion on a given channel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * - Apply sampling time settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * - Program sequencer with one channel (e.g. in SQ1 with len = 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * - Use SW trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * - Start conversion, then wait for interrupt completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static int stm32_adc_single_conv(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) const struct iio_chan_spec *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) int *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct device *dev = indio_dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) const struct stm32_adc_regspec *regs = adc->cfg->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) reinit_completion(&adc->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) adc->bufi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /* Apply sampling time settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) stm32_adc_writel(adc, regs->smpr[0], adc->smpr_val[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) stm32_adc_writel(adc, regs->smpr[1], adc->smpr_val[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* Program chan number in regular sequence (SQ1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) val = stm32_adc_readl(adc, regs->sqr[1].reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) val &= ~regs->sqr[1].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) val |= chan->channel << regs->sqr[1].shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) stm32_adc_writel(adc, regs->sqr[1].reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* Set regular sequence len (0 for 1 conversion) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) stm32_adc_clr_bits(adc, regs->sqr[0].reg, regs->sqr[0].mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /* Trigger detection disabled (conversion can be launched in SW) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) stm32_adc_clr_bits(adc, regs->exten.reg, regs->exten.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) stm32_adc_conv_irq_enable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) adc->cfg->start_conv(indio_dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) timeout = wait_for_completion_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) &adc->completion, STM32_ADC_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) } else if (timeout < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) ret = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) *res = adc->buffer[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ret = IIO_VAL_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) adc->cfg->stop_conv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) stm32_adc_conv_irq_disable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) pm_runtime_put_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static int stm32_adc_read_raw(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct iio_chan_spec const *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int *val, int *val2, long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) switch (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) case IIO_CHAN_INFO_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) ret = iio_device_claim_direct_mode(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (chan->type == IIO_VOLTAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) ret = stm32_adc_single_conv(indio_dev, chan, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) iio_device_release_direct_mode(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) case IIO_CHAN_INFO_SCALE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (chan->differential) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) *val = adc->common->vref_mv * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) *val2 = chan->scan_type.realbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) *val = adc->common->vref_mv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) *val2 = chan->scan_type.realbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return IIO_VAL_FRACTIONAL_LOG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) case IIO_CHAN_INFO_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (chan->differential)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /* ADC_full_scale / 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *val = -((1 << chan->scan_type.realbits) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return IIO_VAL_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static void stm32_adc_irq_clear(struct iio_dev *indio_dev, u32 msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) adc->cfg->irq_clear(indio_dev, msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct iio_dev *indio_dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) const struct stm32_adc_regspec *regs = adc->cfg->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* Check ovr status right now, as ovr mask should be already disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (status & regs->isr_ovr.mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * Clear ovr bit to avoid subsequent calls to IRQ handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * This requires to stop ADC first. OVR bit state in ISR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * is propaged to CSR register by hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) adc->cfg->stop_conv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) stm32_adc_irq_clear(indio_dev, regs->isr_ovr.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) dev_err(&indio_dev->dev, "Overrun, stopping: restart needed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (!(status & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) dev_err_ratelimited(&indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) mask, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static irqreturn_t stm32_adc_isr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct iio_dev *indio_dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) const struct stm32_adc_regspec *regs = adc->cfg->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!(status & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (status & regs->isr_ovr.mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * Overrun occurred on regular conversions: data for wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * channel may be read. Unconditionally disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * to stop processing data and print error message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * Restarting the capture can be done by disabling, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * re-enabling it (e.g. write 0, then 1 to buffer/enable).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) stm32_adc_ovr_irq_disable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) stm32_adc_conv_irq_disable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (status & regs->isr_eoc.mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /* Reading DR also clears EOC status flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) adc->buffer[adc->bufi] = stm32_adc_readw(adc, regs->dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (iio_buffer_enabled(indio_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) adc->bufi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (adc->bufi >= adc->num_conv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) stm32_adc_conv_irq_disable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) iio_trigger_poll(indio_dev->trig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) complete(&adc->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * stm32_adc_validate_trigger() - validate trigger for stm32 adc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * @indio_dev: IIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * @trig: new trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * Returns: 0 if trig matches one of the triggers registered by stm32 adc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * driver, -EINVAL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static int stm32_adc_validate_trigger(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct iio_trigger *trig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return stm32_adc_get_trig_extsel(indio_dev, trig) < 0 ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * dma cyclic transfers are used, buffer is split into two periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * There should be :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * - always one buffer (period) dma is working on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * - one buffer (period) driver can push with iio_trigger_poll().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) watermark = min(watermark, val * (unsigned)(sizeof(u16)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static int stm32_adc_update_scan_mode(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) const unsigned long *scan_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct device *dev = indio_dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) adc->num_conv = bitmap_weight(scan_mask, indio_dev->masklength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ret = stm32_adc_conf_scan_seq(indio_dev, scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) pm_runtime_put_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) const struct of_phandle_args *iiospec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) for (i = 0; i < indio_dev->num_channels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (indio_dev->channels[i].channel == iiospec->args[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * stm32_adc_debugfs_reg_access - read or write register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * @indio_dev: IIO device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * @reg: register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * @writeval: value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * @readval: value to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * To read a value from an ADC register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * echo [ADC reg offset] > direct_reg_access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * cat direct_reg_access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * To write a value in a ADC register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * echo [ADC_reg_offset] [value] > direct_reg_access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) unsigned reg, unsigned writeval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) unsigned *readval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct device *dev = indio_dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (!readval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) stm32_adc_writel(adc, reg, writeval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) *readval = stm32_adc_readl(adc, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) pm_runtime_put_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static const struct iio_info stm32_adc_iio_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) .read_raw = stm32_adc_read_raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) .validate_trigger = stm32_adc_validate_trigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) .hwfifo_set_watermark = stm32_adc_set_watermark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) .update_scan_mode = stm32_adc_update_scan_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) .debugfs_reg_access = stm32_adc_debugfs_reg_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) .of_xlate = stm32_adc_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) status = dmaengine_tx_status(adc->dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) adc->dma_chan->cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (status == DMA_IN_PROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* Residue is size in bytes from end of buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) unsigned int i = adc->rx_buf_sz - state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /* Return available bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (i >= adc->bufi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) size = i - adc->bufi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) size = adc->rx_buf_sz + i - adc->bufi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static void stm32_adc_dma_buffer_done(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct iio_dev *indio_dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) int residue = stm32_adc_dma_residue(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * In DMA mode the trigger services of IIO are not used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * (e.g. no call to iio_trigger_poll).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * Calling irq handler associated to the hardware trigger is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * relevant as the conversions have already been done. Data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * transfers are performed directly in DMA callback instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * This implementation avoids to call trigger irq handler that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * may sleep, in an atomic context (DMA irq handler context).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) while (residue >= indio_dev->scan_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) iio_push_to_buffers(indio_dev, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) residue -= indio_dev->scan_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) adc->bufi += indio_dev->scan_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (adc->bufi >= adc->rx_buf_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) adc->bufi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) static int stm32_adc_dma_start(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (!adc->dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) dev_dbg(&indio_dev->dev, "%s size=%d watermark=%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) adc->rx_buf_sz, adc->rx_buf_sz / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /* Prepare a DMA cyclic transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) desc = dmaengine_prep_dma_cyclic(adc->dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) adc->rx_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) adc->rx_buf_sz, adc->rx_buf_sz / 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) desc->callback = stm32_adc_dma_buffer_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) desc->callback_param = indio_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) ret = dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) dmaengine_terminate_sync(adc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /* Issue pending DMA requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) dma_async_issue_pending(adc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct device *dev = indio_dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) ret = stm32_adc_set_trig(indio_dev, indio_dev->trig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) dev_err(&indio_dev->dev, "Can't set trigger\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) goto err_pm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) ret = stm32_adc_dma_start(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) dev_err(&indio_dev->dev, "Can't start dma\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) goto err_clr_trig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /* Reset adc buffer index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) adc->bufi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) stm32_adc_ovr_irq_enable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (!adc->dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) stm32_adc_conv_irq_enable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) adc->cfg->start_conv(indio_dev, !!adc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) err_clr_trig:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) stm32_adc_set_trig(indio_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) err_pm_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) pm_runtime_put_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) struct device *dev = indio_dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) adc->cfg->stop_conv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (!adc->dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) stm32_adc_conv_irq_disable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) stm32_adc_ovr_irq_disable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (adc->dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) dmaengine_terminate_sync(adc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (stm32_adc_set_trig(indio_dev, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) dev_err(&indio_dev->dev, "Can't clear trigger\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) pm_runtime_put_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static const struct iio_buffer_setup_ops stm32_adc_buffer_setup_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) .postenable = &stm32_adc_buffer_postenable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) .predisable = &stm32_adc_buffer_predisable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) static irqreturn_t stm32_adc_trigger_handler(int irq, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct iio_poll_func *pf = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) struct iio_dev *indio_dev = pf->indio_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (!adc->dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /* reset buffer index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) adc->bufi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) iio_push_to_buffers_with_timestamp(indio_dev, adc->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) pf->timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) int residue = stm32_adc_dma_residue(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) while (residue >= indio_dev->scan_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) iio_push_to_buffers_with_timestamp(indio_dev, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) pf->timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) residue -= indio_dev->scan_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) adc->bufi += indio_dev->scan_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (adc->bufi >= adc->rx_buf_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) adc->bufi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) iio_trigger_notify_done(indio_dev->trig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* re-enable eoc irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (!adc->dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) stm32_adc_conv_irq_enable(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static const struct iio_chan_spec_ext_info stm32_adc_ext_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) IIO_ENUM("trigger_polarity", IIO_SHARED_BY_ALL, &stm32_adc_trig_pol),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) .name = "trigger_polarity_available",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) .shared = IIO_SHARED_BY_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) .read = iio_enum_available_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) .private = (uintptr_t)&stm32_adc_trig_pol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static int stm32_adc_of_get_resolution(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct device_node *node = indio_dev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) u32 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (of_property_read_u32(node, "assigned-resolution-bits", &res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) res = adc->cfg->adc_info->resolutions[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) for (i = 0; i < adc->cfg->adc_info->num_res; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (res == adc->cfg->adc_info->resolutions[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (i >= adc->cfg->adc_info->num_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) dev_err(&indio_dev->dev, "Bad resolution: %u bits\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) dev_dbg(&indio_dev->dev, "Using %u bits resolution\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) adc->res = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) static void stm32_adc_smpr_init(struct stm32_adc *adc, int channel, u32 smp_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) const struct stm32_adc_regs *smpr = &adc->cfg->regs->smp_bits[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) u32 period_ns, shift = smpr->shift, mask = smpr->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) unsigned int smp, r = smpr->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /* Determine sampling time (ADC clock cycles) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) period_ns = NSEC_PER_SEC / adc->common->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) for (smp = 0; smp <= STM32_ADC_MAX_SMP; smp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if ((period_ns * adc->cfg->smp_cycles[smp]) >= smp_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (smp > STM32_ADC_MAX_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) smp = STM32_ADC_MAX_SMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) /* pre-build sampling time registers (e.g. smpr1, smpr2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) adc->smpr_val[r] = (adc->smpr_val[r] & ~mask) | (smp << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct iio_chan_spec *chan, u32 vinp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) u32 vinn, int scan_index, bool differential)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) char *name = adc->chan_name[vinp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) chan->type = IIO_VOLTAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) chan->channel = vinp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (differential) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) chan->differential = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) chan->channel2 = vinn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) snprintf(name, STM32_ADC_CH_SZ, "in%d-in%d", vinp, vinn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) snprintf(name, STM32_ADC_CH_SZ, "in%d", vinp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) chan->datasheet_name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) chan->scan_index = scan_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) chan->indexed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) BIT(IIO_CHAN_INFO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) chan->scan_type.sign = 'u';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) chan->scan_type.realbits = adc->cfg->adc_info->resolutions[adc->res];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) chan->scan_type.storagebits = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) chan->ext_info = stm32_adc_ext_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /* pre-build selected channels mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) adc->pcsel |= BIT(chan->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (differential) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /* pre-build diff channels mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) adc->difsel |= BIT(chan->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) /* Also add negative input to pre-selected channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) adc->pcsel |= BIT(chan->channel2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct device_node *node = indio_dev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) const __be32 *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct iio_chan_spec *channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) int scan_index = 0, num_channels = 0, num_diff = 0, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) u32 val, smp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) ret = of_property_count_u32_elems(node, "st,adc-channels");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (ret > adc_info->max_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) } else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) num_channels += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) ret = of_property_count_elems_of_size(node, "st,adc-diff-channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) sizeof(*diff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (ret > adc_info->max_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) } else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) int size = ret * sizeof(*diff) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) num_diff = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) num_channels += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) ret = of_property_read_u32_array(node, "st,adc-diff-channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) (u32 *)diff, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (!num_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) dev_err(&indio_dev->dev, "No channels configured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /* Optional sample time is provided either for each, or all channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) ret = of_property_count_u32_elems(node, "st,min-sample-time-nsecs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (ret > 1 && ret != num_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) dev_err(&indio_dev->dev, "Invalid st,min-sample-time-nsecs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) channels = devm_kcalloc(&indio_dev->dev, num_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) sizeof(struct iio_chan_spec), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (!channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (val >= adc_info->max_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* Channel can't be configured both as single-ended & diff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) for (i = 0; i < num_diff; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (val == diff[i].vinp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) dev_err(&indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) "channel %d miss-configured\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 0, scan_index, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) scan_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) for (i = 0; i < num_diff; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (diff[i].vinp >= adc_info->max_channels ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) diff[i].vinn >= adc_info->max_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) diff[i].vinp, diff[i].vinn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) diff[i].vinp, diff[i].vinn, scan_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) scan_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) for (i = 0; i < scan_index; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * Using of_property_read_u32_index(), smp value will only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * modified if valid u32 value can be decoded. This allows to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * get either no value, 1 shared value for all indexes, or one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * value per channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) of_property_read_u32_index(node, "st,min-sample-time-nsecs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) i, &smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) /* Prepare sampling time settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) stm32_adc_smpr_init(adc, channels[i].channel, smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) indio_dev->num_channels = scan_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) indio_dev->channels = channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) struct dma_slave_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) adc->dma_chan = dma_request_chan(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (IS_ERR(adc->dma_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) ret = PTR_ERR(adc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (ret != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return dev_err_probe(dev, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) "DMA channel request failed with\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /* DMA is optional: fall back to IRQ mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) adc->dma_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) STM32_DMA_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) &adc->rx_dma_buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (!adc->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /* Configure DMA channel to read data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) memset(&config, 0, sizeof(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) config.src_addr = (dma_addr_t)adc->common->phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) config.src_addr += adc->offset + adc->cfg->regs->dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) ret = dmaengine_slave_config(adc->dma_chan, &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) dma_free_coherent(adc->dma_chan->device->dev, STM32_DMA_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) adc->rx_buf, adc->rx_dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) err_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) dma_release_channel(adc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) static int stm32_adc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) struct iio_dev *indio_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) irqreturn_t (*handler)(int irq, void *p) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) struct stm32_adc *adc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (!pdev->dev.of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (!indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) adc->common = dev_get_drvdata(pdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) spin_lock_init(&adc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) init_completion(&adc->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) adc->cfg = (const struct stm32_adc_cfg *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) of_match_device(dev->driver->of_match_table, dev)->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) indio_dev->name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) indio_dev->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) indio_dev->info = &stm32_adc_iio_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) indio_dev->modes = INDIO_DIRECT_MODE | INDIO_HARDWARE_TRIGGERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) platform_set_drvdata(pdev, indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) ret = of_property_read_u32(pdev->dev.of_node, "reg", &adc->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) dev_err(&pdev->dev, "missing reg property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) adc->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (adc->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return adc->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ret = devm_request_threaded_irq(&pdev->dev, adc->irq, stm32_adc_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) stm32_adc_threaded_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 0, pdev->name, indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) dev_err(&pdev->dev, "failed to request IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) adc->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (IS_ERR(adc->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) ret = PTR_ERR(adc->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (ret == -ENOENT && !adc->cfg->clk_required) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) adc->clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) dev_err(&pdev->dev, "Can't get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) ret = stm32_adc_of_get_resolution(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) ret = stm32_adc_chan_of_init(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) ret = stm32_adc_dma_request(dev, indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (!adc->dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) handler = &stm32_adc_trigger_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) ret = iio_triggered_buffer_setup(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) &iio_pollfunc_store_time, handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) &stm32_adc_buffer_setup_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) dev_err(&pdev->dev, "buffer setup failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) goto err_dma_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /* Get stm32-adc-core PM online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) pm_runtime_get_noresume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) pm_runtime_set_active(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) pm_runtime_set_autosuspend_delay(dev, STM32_ADC_HW_STOP_DELAY_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) pm_runtime_use_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) ret = stm32_adc_hw_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) goto err_buffer_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) ret = iio_device_register(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) dev_err(&pdev->dev, "iio dev register failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) goto err_hw_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) pm_runtime_put_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) err_hw_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) stm32_adc_hw_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) err_buffer_cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) pm_runtime_set_suspended(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) iio_triggered_buffer_cleanup(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) err_dma_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (adc->dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) dma_free_coherent(adc->dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) STM32_DMA_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) adc->rx_buf, adc->rx_dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) dma_release_channel(adc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) static int stm32_adc_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) struct iio_dev *indio_dev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) struct stm32_adc *adc = iio_priv(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) iio_device_unregister(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) stm32_adc_hw_stop(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) pm_runtime_set_suspended(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) iio_triggered_buffer_cleanup(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (adc->dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) dma_free_coherent(adc->dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) STM32_DMA_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) adc->rx_buf, adc->rx_dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) dma_release_channel(adc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) #if defined(CONFIG_PM_SLEEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) static int stm32_adc_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) struct iio_dev *indio_dev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (iio_buffer_enabled(indio_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) stm32_adc_buffer_predisable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static int stm32_adc_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) struct iio_dev *indio_dev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) ret = pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (!iio_buffer_enabled(indio_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) ret = stm32_adc_update_scan_mode(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) indio_dev->active_scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) return stm32_adc_buffer_postenable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) #if defined(CONFIG_PM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static int stm32_adc_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) return stm32_adc_hw_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) static int stm32_adc_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) return stm32_adc_hw_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) static const struct dev_pm_ops stm32_adc_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) SET_SYSTEM_SLEEP_PM_OPS(stm32_adc_suspend, stm32_adc_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) SET_RUNTIME_PM_OPS(stm32_adc_runtime_suspend, stm32_adc_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) static const struct stm32_adc_cfg stm32f4_adc_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) .regs = &stm32f4_adc_regspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) .adc_info = &stm32f4_adc_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) .trigs = stm32f4_adc_trigs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) .clk_required = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) .start_conv = stm32f4_adc_start_conv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) .stop_conv = stm32f4_adc_stop_conv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) .smp_cycles = stm32f4_adc_smp_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) .irq_clear = stm32f4_adc_irq_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static const struct stm32_adc_cfg stm32h7_adc_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) .regs = &stm32h7_adc_regspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) .adc_info = &stm32h7_adc_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) .trigs = stm32h7_adc_trigs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) .start_conv = stm32h7_adc_start_conv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) .stop_conv = stm32h7_adc_stop_conv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) .prepare = stm32h7_adc_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) .unprepare = stm32h7_adc_unprepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) .smp_cycles = stm32h7_adc_smp_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) .irq_clear = stm32h7_adc_irq_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) .regs = &stm32h7_adc_regspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) .adc_info = &stm32h7_adc_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) .trigs = stm32h7_adc_trigs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) .has_vregready = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) .start_conv = stm32h7_adc_start_conv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) .stop_conv = stm32h7_adc_stop_conv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) .prepare = stm32h7_adc_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) .unprepare = stm32h7_adc_unprepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) .smp_cycles = stm32h7_adc_smp_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) .irq_clear = stm32h7_adc_irq_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) static const struct of_device_id stm32_adc_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) { .compatible = "st,stm32f4-adc", .data = (void *)&stm32f4_adc_cfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) { .compatible = "st,stm32h7-adc", .data = (void *)&stm32h7_adc_cfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) { .compatible = "st,stm32mp1-adc", .data = (void *)&stm32mp1_adc_cfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) static struct platform_driver stm32_adc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) .probe = stm32_adc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) .remove = stm32_adc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) .name = "stm32-adc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) .of_match_table = stm32_adc_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) .pm = &stm32_adc_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) module_platform_driver(stm32_adc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) MODULE_DESCRIPTION("STMicroelectronics STM32 ADC IIO driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) MODULE_ALIAS("platform:stm32-adc");