^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Intel Management Engine Interface (Intel MEI) Linux driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "mei_dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "hbm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "hw-me.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "hw-me-regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "mei-trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * mei_me_reg_read - Reads 32bit data from the mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @hw: the me hardware structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * @offset: offset from which to read the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Return: register value (u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return ioread32(hw->mem_addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * mei_me_reg_write - Writes 32bit data to the mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @hw: the me hardware structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * @offset: offset from which to write the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * @value: register value to write (u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline void mei_me_reg_write(const struct mei_me_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) iowrite32(value, hw->mem_addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * read window register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Return: ME_CB_RW register value (u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * mei_me_hcbww_write - write 32bit data to the host circular buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @data: 32bit data to be written to the host circular buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * mei_me_mecsr_read - Reads 32bit data from the ME CSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Return: ME_CSR_HA register value (u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * mei_hcsr_read - Reads 32bit data from the host CSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Return: H_CSR register value (u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline u32 mei_hcsr_read(const struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * mei_hcsr_write - writes H_CSR register to the mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @reg: new register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * mei_hcsr_set - writes H_CSR register to the mei device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * and ignores the H_IS bit for it is write-one-to-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @reg: new register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) reg &= ~H_CSR_IS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) mei_hcsr_write(dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * mei_hcsr_set_hig - set host interrupt (set H_IG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline void mei_hcsr_set_hig(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u32 hcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) hcsr = mei_hcsr_read(dev) | H_IG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mei_hcsr_set(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * Return: H_D0I3C register value (u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * mei_me_d0i3c_write - writes H_D0I3C register to device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @reg: new register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * mei_me_trc_status - read trc status register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @trc: trc status register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Return: 0 on success, error otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!hw->cfg->hw_trc_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *trc = mei_me_reg_read(hw, ME_TRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * mei_me_fw_status - read fw status register from pci config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @fw_status: fw status register values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Return: 0 on success, error otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int mei_me_fw_status(struct mei_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct mei_fw_status *fw_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!fw_status || !hw->read_fws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) fw_status->count = fw_src->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ret = hw->read_fws(dev, fw_src->status[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) &fw_status->status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) fw_src->status[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) fw_status->status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * mei_me_hw_config - configure hw dependent settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * * -EINVAL when read_fws is not set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * * 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int mei_me_hw_config(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 hcsr, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (WARN_ON(!hw->read_fws))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* Doesn't change in runtime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) hw->hbuf_depth = (hcsr & H_CBD) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) hw->read_fws(dev, PCI_CFG_HFS_1, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) hw->d0i3_supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) hw->pg_state = MEI_PG_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (hw->d0i3_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (reg & H_D0I3C_I3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) hw->pg_state = MEI_PG_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * mei_me_pg_state - translate internal pg state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * to the mei power gating state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return hw->pg_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static inline u32 me_intr_src(u32 hcsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return hcsr & H_CSR_IS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * me_intr_disable - disables mei device interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * using supplied hcsr register value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * @hcsr: supplied hcsr register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) hcsr &= ~H_CSR_IE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mei_hcsr_set(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * me_intr_clear - clear and stop interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @hcsr: supplied hcsr register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (me_intr_src(hcsr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mei_hcsr_write(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * mei_me_intr_clear - clear and stop interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void mei_me_intr_clear(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u32 hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) me_intr_clear(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * mei_me_intr_enable - enables mei device interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void mei_me_intr_enable(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) hcsr |= H_CSR_IE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mei_hcsr_set(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * mei_me_intr_disable - disables mei device interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void mei_me_intr_disable(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) u32 hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) me_intr_disable(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * mei_me_synchronize_irq - wait for pending IRQ handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void mei_me_synchronize_irq(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) synchronize_irq(hw->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * mei_me_hw_reset_release - release device from the reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void mei_me_hw_reset_release(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u32 hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) hcsr |= H_IG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) hcsr &= ~H_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mei_hcsr_set(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * mei_me_host_set_ready - enable device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static void mei_me_host_set_ready(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u32 hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) mei_hcsr_set(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * mei_me_host_is_ready - check whether the host has turned ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Return: bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static bool mei_me_host_is_ready(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) u32 hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return (hcsr & H_RDY) == H_RDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * mei_me_hw_is_ready - check whether the me(hw) has turned ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Return: bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static bool mei_me_hw_is_ready(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) u32 mecsr = mei_me_mecsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * mei_me_hw_is_resetting - check whether the me(hw) is in reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Return: bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static bool mei_me_hw_is_resetting(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) u32 mecsr = mei_me_mecsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return (mecsr & ME_RST_HRA) == ME_RST_HRA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * or timeout is reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * Return: 0 on success, error otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int mei_me_hw_ready_wait(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) wait_event_timeout(dev->wait_hw_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dev->recvd_hw_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (!dev->recvd_hw_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dev_err(dev->dev, "wait hw ready failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mei_me_hw_reset_release(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dev->recvd_hw_ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * mei_me_hw_start - hw start routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Return: 0 on success, error otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int mei_me_hw_start(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int ret = mei_me_hw_ready_wait(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dev_dbg(dev->dev, "hw is ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mei_me_host_set_ready(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * mei_hbuf_filled_slots - gets number of device filled buffer slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Return: number of filled slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) u32 hcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) char read_ptr, write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) read_ptr = (char) ((hcsr & H_CBRP) >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) write_ptr = (char) ((hcsr & H_CBWP) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return (unsigned char) (write_ptr - read_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * mei_me_hbuf_is_empty - checks if host buffer is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Return: true if empty, false - otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static bool mei_me_hbuf_is_empty(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return mei_hbuf_filled_slots(dev) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * mei_me_hbuf_empty_slots - counts write empty slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * Return: -EOVERFLOW if overflow, otherwise empty slots count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int mei_me_hbuf_empty_slots(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned char filled_slots, empty_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) filled_slots = mei_hbuf_filled_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) empty_slots = hw->hbuf_depth - filled_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* check for overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (filled_slots > hw->hbuf_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return empty_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * mei_me_hbuf_depth - returns depth of the hw buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Return: size of hw buffer in slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static u32 mei_me_hbuf_depth(const struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return hw->hbuf_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * mei_me_hbuf_write - writes a message to host hw buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * @hdr: header of message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * @data: payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * @data_len: payload length in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Return: 0 if success, < 0 - otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static int mei_me_hbuf_write(struct mei_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) const void *hdr, size_t hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) const void *data, size_t data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) unsigned long rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) const u32 *reg_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) u32 dw_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int empty_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (WARN_ON(!hdr || !data || hdr_len & 0x3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) empty_slots = mei_hbuf_empty_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (empty_slots < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dw_cnt = mei_data2slots(hdr_len + data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (dw_cnt > (u32)empty_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) reg_buf = hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) mei_me_hcbww_write(dev, reg_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) reg_buf = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) mei_me_hcbww_write(dev, reg_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) rem = data_len & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (rem > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) u32 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) memcpy(®, (const u8 *)data + data_len - rem, rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) mei_me_hcbww_write(dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) mei_hcsr_set_hig(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (!mei_me_hw_is_ready(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * mei_me_count_full_read_slots - counts read full slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Return: -EOVERFLOW if overflow, otherwise filled slots count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static int mei_me_count_full_read_slots(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) u32 me_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) char read_ptr, write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) unsigned char buffer_depth, filled_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) me_csr = mei_me_mecsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) filled_slots = (unsigned char) (write_ptr - read_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* check for overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (filled_slots > buffer_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return (int)filled_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * mei_me_read_slots - reads a message from mei device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * @buffer: message buffer will be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * @buffer_length: message size will be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * Return: always 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned long buffer_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) u32 *reg_buf = (u32 *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) *reg_buf++ = mei_me_mecbrw_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (buffer_length > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) u32 reg = mei_me_mecbrw_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) memcpy(reg_buf, ®, buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) mei_hcsr_set_hig(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * mei_me_pg_set - write pg enter register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static void mei_me_pg_set(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) reg = mei_me_reg_read(hw, H_HPG_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) reg |= H_HPG_CSR_PGI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) mei_me_reg_write(hw, H_HPG_CSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * mei_me_pg_unset - write pg exit register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static void mei_me_pg_unset(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) reg = mei_me_reg_read(hw, H_HPG_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) reg |= H_HPG_CSR_PGIHEXR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) mei_me_reg_write(hw, H_HPG_CSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * Return: 0 on success an error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dev->pg_event = MEI_PG_EVENT_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) wait_event_timeout(dev->wait_pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) mei_me_pg_set(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) dev->pg_event = MEI_PG_EVENT_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) hw->pg_state = MEI_PG_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * Return: 0 on success an error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) goto reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) dev->pg_event = MEI_PG_EVENT_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) mei_me_pg_unset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) wait_event_timeout(dev->wait_pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) reply:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) wait_event_timeout(dev->wait_pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) dev->pg_event = MEI_PG_EVENT_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) hw->pg_state = MEI_PG_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * mei_me_pg_in_transition - is device now in pg transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * Return: true if in pg transition, false otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static bool mei_me_pg_in_transition(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return dev->pg_event >= MEI_PG_EVENT_WAIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * mei_me_pg_is_enabled - detect if PG is supported by HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Return: true is pg supported, false otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static bool mei_me_pg_is_enabled(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) u32 reg = mei_me_mecsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (hw->d0i3_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if ((reg & ME_PGIC_HRA) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) goto notsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!dev->hbm_f_pg_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) goto notsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) notsupported:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) hw->d0i3_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) !!(reg & ME_PGIC_HRA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) dev->version.major_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) dev->version.minor_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) HBM_MAJOR_VERSION_PGI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) HBM_MINOR_VERSION_PGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * mei_me_d0i3_set - write d0i3 register bit on mei device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * @intr: ask for interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * Return: D0I3C register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) u32 reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) reg |= H_D0I3C_I3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) reg |= H_D0I3C_IR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) reg &= ~H_D0I3C_IR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) mei_me_d0i3c_write(dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* read it to ensure HW consistency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Return: D0I3C register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static u32 mei_me_d0i3_unset(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) u32 reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) reg &= ~H_D0I3C_I3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) reg |= H_D0I3C_IR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) mei_me_d0i3c_write(dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* read it to ensure HW consistency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Return: 0 on success an error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static int mei_me_d0i3_enter_sync(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (reg & H_D0I3C_I3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* we are in d0i3, nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) dev_dbg(dev->dev, "d0i3 set not needed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) goto on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* PGI entry procedure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dev->pg_event = MEI_PG_EVENT_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* FIXME: should we reset here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) wait_event_timeout(dev->wait_pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /* end PGI entry procedure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) reg = mei_me_d0i3_set(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (!(reg & H_D0I3C_CIP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) goto on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) wait_event_timeout(dev->wait_pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (!(reg & H_D0I3C_I3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) hw->pg_state = MEI_PG_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dev->pg_event = MEI_PG_EVENT_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * mei_me_d0i3_enter - perform d0i3 entry procedure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * no hbm PG handshake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * no waiting for confirmation; runs with interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * Return: 0 on success an error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static int mei_me_d0i3_enter(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (reg & H_D0I3C_I3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* we are in d0i3, nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) dev_dbg(dev->dev, "already d0i3 : set not needed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) goto on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) mei_me_d0i3_set(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) hw->pg_state = MEI_PG_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dev->pg_event = MEI_PG_EVENT_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) dev_dbg(dev->dev, "d0i3 enter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * Return: 0 on success an error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static int mei_me_d0i3_exit_sync(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (!(reg & H_D0I3C_I3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /* we are not in d0i3, nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dev_dbg(dev->dev, "d0i3 exit not needed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) goto off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) reg = mei_me_d0i3_unset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (!(reg & H_D0I3C_CIP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) goto off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) wait_event_timeout(dev->wait_pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) reg = mei_me_d0i3c_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (reg & H_D0I3C_I3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) hw->pg_state = MEI_PG_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dev->pg_event = MEI_PG_EVENT_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * mei_me_pg_legacy_intr - perform legacy pg processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * in interrupt thread handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static void mei_me_pg_legacy_intr(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) hw->pg_state = MEI_PG_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (waitqueue_active(&dev->wait_pg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) wake_up(&dev->wait_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * @intr_source: interrupt source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) (intr_source & H_D0I3C_IS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (hw->pg_state == MEI_PG_ON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) hw->pg_state = MEI_PG_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (dev->hbm_state != MEI_HBM_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * force H_RDY because it could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * wiped off during PG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) dev_dbg(dev->dev, "d0i3 set host ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) mei_me_host_set_ready(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) hw->pg_state = MEI_PG_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) wake_up(&dev->wait_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * HW sent some data and we are in D0i3, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * we got here because of HW initiated exit from D0i3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * Start runtime pm resume sequence to exit low power state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) dev_dbg(dev->dev, "d0i3 want resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) mei_hbm_pg_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * mei_me_pg_intr - perform pg processing in interrupt thread handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * @intr_source: interrupt source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (hw->d0i3_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) mei_me_d0i3_intr(dev, intr_source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) mei_me_pg_legacy_intr(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * mei_me_pg_enter_sync - perform runtime pm entry procedure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * Return: 0 on success an error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) int mei_me_pg_enter_sync(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (hw->d0i3_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return mei_me_d0i3_enter_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return mei_me_pg_legacy_enter_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * mei_me_pg_exit_sync - perform runtime pm exit procedure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Return: 0 on success an error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int mei_me_pg_exit_sync(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (hw->d0i3_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return mei_me_d0i3_exit_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return mei_me_pg_legacy_exit_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * mei_me_hw_reset - resets fw via mei csr register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * @intr_enable: if interrupt should be enabled after reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * Return: 0 on success an error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct mei_me_hw *hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) u32 hcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (intr_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) mei_me_intr_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (hw->d0i3_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ret = mei_me_d0i3_exit_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) pm_runtime_set_active(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /* H_RST may be found lit before reset is started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * for example if preceding reset flow hasn't completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * In that case asserting H_RST will be ignored, therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * we need to clean H_RST bit to start a successful reset sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if ((hcsr & H_RST) == H_RST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) hcsr &= ~H_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) mei_hcsr_set(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!intr_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) hcsr &= ~H_CSR_IE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) dev->recvd_hw_ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) mei_hcsr_write(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * Host reads the H_CSR once to ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * posted write to H_CSR completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if ((hcsr & H_RST) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if ((hcsr & H_RDY) == H_RDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (!intr_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) mei_me_hw_reset_release(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (hw->d0i3_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ret = mei_me_d0i3_enter(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * mei_me_irq_quick_handler - The ISR of the MEI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * @irq: The irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * @dev_id: pointer to the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * Return: irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct mei_device *dev = (struct mei_device *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) u32 hcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (!me_intr_src(hcsr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* disable interrupts on device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) me_intr_disable(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * @irq: The irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * @dev_id: pointer to the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * Return: irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) struct mei_device *dev = (struct mei_device *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct list_head cmpl_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) s32 slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) u32 hcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) int rets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /* initialize our complete list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) hcsr = mei_hcsr_read(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) me_intr_clear(dev, hcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) INIT_LIST_HEAD(&cmpl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /* check if ME wants a reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) dev_warn(dev->dev, "FW not ready: resetting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) schedule_work(&dev->reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (mei_me_hw_is_resetting(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) mei_hcsr_set_hig(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) mei_me_pg_intr(dev, me_intr_src(hcsr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /* check if we need to start the dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (!mei_host_is_ready(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (mei_hw_is_ready(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) dev_dbg(dev->dev, "we need to start the dev.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) dev->recvd_hw_ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) wake_up(&dev->wait_hw_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) dev_dbg(dev->dev, "Spurious Interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /* check slots available for reading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) slots = mei_count_full_read_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) while (slots > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) dev_dbg(dev->dev, "slots to read = %08x\n", slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /* There is a race between ME write and interrupt delivery:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * Not all data is always available immediately after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * interrupt, so try to read again on the next interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (rets == -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (rets &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) (dev->dev_state != MEI_DEV_RESETTING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) dev->dev_state != MEI_DEV_POWER_DOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) rets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) schedule_work(&dev->reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * During PG handshake only allowed write is the replay to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * PG exit message, so block calling write function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * if the pg event is in PG handshake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (dev->pg_event != MEI_PG_EVENT_WAIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) dev->pg_event != MEI_PG_EVENT_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) rets = mei_irq_write_handler(dev, &cmpl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) mei_irq_compl_handler(dev, &cmpl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) mei_me_intr_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static const struct mei_hw_ops mei_me_hw_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .trc_status = mei_me_trc_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .fw_status = mei_me_fw_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) .pg_state = mei_me_pg_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) .host_is_ready = mei_me_host_is_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) .hw_is_ready = mei_me_hw_is_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .hw_reset = mei_me_hw_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) .hw_config = mei_me_hw_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) .hw_start = mei_me_hw_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) .pg_in_transition = mei_me_pg_in_transition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) .pg_is_enabled = mei_me_pg_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) .intr_clear = mei_me_intr_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) .intr_enable = mei_me_intr_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) .intr_disable = mei_me_intr_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) .synchronize_irq = mei_me_synchronize_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) .hbuf_free_slots = mei_me_hbuf_empty_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) .hbuf_is_ready = mei_me_hbuf_is_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .hbuf_depth = mei_me_hbuf_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) .write = mei_me_hbuf_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .rdbuf_full_slots = mei_me_count_full_read_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .read_hdr = mei_me_mecbrw_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) .read = mei_me_read_slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * mei_me_fw_type_nm() - check for nm sku
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * Read ME FW Status register to check for the Node Manager (NM) Firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * The NM FW is only signaled in PCI function 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * __Note__: Deprecated by PCH8 and newer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * @pdev: pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * Return: true in case of NM firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) unsigned int devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return (reg & 0x600) == 0x200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) #define MEI_CFG_FW_NM \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) .quirk_probe = mei_me_fw_type_nm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * mei_me_fw_sku_sps_4() - check for sps 4.0 sku
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * Read ME FW Status register to check for SPS Firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * The SPS FW is only signaled in the PCI function 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * __Note__: Deprecated by SPS 5.0 and newer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * @pdev: pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * Return: true in case of SPS firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) unsigned int devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) #define MEI_CFG_FW_SPS_4 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) .quirk_probe = mei_me_fw_type_sps_4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * mei_me_fw_sku_sps() - check for sps sku
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * Read ME FW Status register to check for SPS Firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * The SPS FW is only signaled in pci function 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * @pdev: pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * Return: true in case of SPS firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) u32 fw_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) unsigned int devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) #define MEI_CFG_KIND_ITOUCH \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) .kind = "itouch"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) #define MEI_CFG_FW_SPS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) .quirk_probe = mei_me_fw_type_sps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) #define MEI_CFG_FW_VER_SUPP \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) .fw_ver_supported = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) #define MEI_CFG_ICH_HFS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) .fw_status.count = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) #define MEI_CFG_ICH10_HFS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) .fw_status.count = 1, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) .fw_status.status[0] = PCI_CFG_HFS_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) #define MEI_CFG_PCH_HFS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) .fw_status.count = 2, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) .fw_status.status[0] = PCI_CFG_HFS_1, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) .fw_status.status[1] = PCI_CFG_HFS_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) #define MEI_CFG_PCH8_HFS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) .fw_status.count = 6, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) .fw_status.status[0] = PCI_CFG_HFS_1, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) .fw_status.status[1] = PCI_CFG_HFS_2, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) .fw_status.status[2] = PCI_CFG_HFS_3, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) .fw_status.status[3] = PCI_CFG_HFS_4, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) .fw_status.status[4] = PCI_CFG_HFS_5, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) .fw_status.status[5] = PCI_CFG_HFS_6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) #define MEI_CFG_DMA_128 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) .dma_size[DMA_DSCR_HOST] = SZ_128K, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) #define MEI_CFG_TRC \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) .hw_trc_supported = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /* ICH Legacy devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static const struct mei_cfg mei_me_ich_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) MEI_CFG_ICH_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* ICH devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static const struct mei_cfg mei_me_ich10_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) MEI_CFG_ICH10_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /* PCH6 devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) static const struct mei_cfg mei_me_pch6_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) MEI_CFG_PCH_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* PCH7 devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static const struct mei_cfg mei_me_pch7_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) MEI_CFG_PCH_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) MEI_CFG_PCH_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) MEI_CFG_FW_NM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /* PCH8 Lynx Point and newer devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) static const struct mei_cfg mei_me_pch8_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) MEI_CFG_PCH8_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* PCH8 Lynx Point and newer devices - iTouch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) static const struct mei_cfg mei_me_pch8_itouch_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) MEI_CFG_KIND_ITOUCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) MEI_CFG_PCH8_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) MEI_CFG_PCH8_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) MEI_CFG_FW_SPS_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /* LBG with quirk for SPS (4.0) Firmware exclusion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) MEI_CFG_PCH8_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) MEI_CFG_FW_SPS_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) /* Cannon Lake and newer devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static const struct mei_cfg mei_me_pch12_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) MEI_CFG_PCH8_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) MEI_CFG_DMA_128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) static const struct mei_cfg mei_me_pch12_sps_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) MEI_CFG_PCH8_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) MEI_CFG_DMA_128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) MEI_CFG_FW_SPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * w/o DMA support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) MEI_CFG_KIND_ITOUCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) MEI_CFG_PCH8_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) MEI_CFG_FW_SPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /* Tiger Lake and newer devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) static const struct mei_cfg mei_me_pch15_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) MEI_CFG_PCH8_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) MEI_CFG_DMA_128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) MEI_CFG_TRC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static const struct mei_cfg mei_me_pch15_sps_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) MEI_CFG_PCH8_HFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) MEI_CFG_FW_VER_SUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) MEI_CFG_DMA_128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) MEI_CFG_TRC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) MEI_CFG_FW_SPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * mei_cfg_list - A list of platform platform specific configurations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * Note: has to be synchronized with enum mei_cfg_idx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) static const struct mei_cfg *const mei_cfg_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) [MEI_ME_UNDEF_CFG] = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (idx >= MEI_ME_NUM_CFG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return mei_cfg_list[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * mei_me_dev_init - allocates and initializes the mei device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * @parent: device associated with physical device (pci/platform)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * @cfg: per device generation config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * Return: The mei_device pointer on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct mei_device *mei_me_dev_init(struct device *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) const struct mei_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct mei_me_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) hw = to_me_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) for (i = 0; i < DMA_DSCR_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) dev->dr_dscr[i].size = cfg->dma_size[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) mei_device_init(dev, parent, &mei_me_hw_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) hw->cfg = cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) dev->kind = cfg->kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)