^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2019 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Lei Chuanhua <Chuanhua.lei@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/reset-controller.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define RCU_RST_STAT 0x0024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define RCU_RST_REQ 0x0048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define REG_OFFSET_MASK GENMASK(31, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define BIT_OFFSET_MASK GENMASK(15, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define STAT_BIT_OFFSET_MASK GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define to_reset_data(x) container_of(x, struct intel_reset_data, rcdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct intel_reset_soc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) bool legacy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u32 reset_cell_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct intel_reset_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct reset_controller_dev rcdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct notifier_block restart_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) const struct intel_reset_soc *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct regmap *regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u32 reboot_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static const struct regmap_config intel_rcu_regmap_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .name = "intel-reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) .reg_bits = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .reg_stride = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .val_bits = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .fast_io = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Reset status register offset relative to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * the reset control register(X) is X + 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long id, u32 *rst_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 *req_bit, u32 *stat_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *rst_req = FIELD_GET(REG_OFFSET_MASK, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *req_bit = FIELD_GET(BIT_OFFSET_MASK, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (data->soc_data->legacy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *stat_bit = FIELD_GET(STAT_BIT_OFFSET_MASK, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *stat_bit = *req_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (data->soc_data->legacy && *rst_req == RCU_RST_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return RCU_RST_STAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return *rst_req + 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static int intel_set_clr_bits(struct intel_reset_data *data, unsigned long id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 rst_req, req_bit, rst_stat, stat_bit, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) &req_bit, &stat_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) val = set ? BIT(req_bit) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ret = regmap_update_bits(data->regmap, rst_req, BIT(req_bit), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return regmap_read_poll_timeout(data->regmap, rst_stat, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) set == !!(val & BIT(stat_bit)), 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int intel_assert_device(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct intel_reset_data *data = to_reset_data(rcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ret = intel_set_clr_bits(data, id, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) dev_err(data->dev, "Reset assert failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int intel_deassert_device(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct intel_reset_data *data = to_reset_data(rcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = intel_set_clr_bits(data, id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) dev_err(data->dev, "Reset deassert failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static int intel_reset_status(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct intel_reset_data *data = to_reset_data(rcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u32 rst_req, req_bit, rst_stat, stat_bit, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) &req_bit, &stat_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ret = regmap_read(data->regmap, rst_stat, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return !!(val & BIT(stat_bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static const struct reset_control_ops intel_reset_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) .assert = intel_assert_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) .deassert = intel_deassert_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) .status = intel_reset_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static int intel_reset_xlate(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) const struct of_phandle_args *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct intel_reset_data *data = to_reset_data(rcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (spec->args[1] > 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) id = FIELD_PREP(REG_OFFSET_MASK, spec->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) id |= FIELD_PREP(BIT_OFFSET_MASK, spec->args[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (data->soc_data->legacy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (spec->args[2] > 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, spec->args[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int intel_reset_restart_handler(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned long action, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct intel_reset_data *reset_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) reset_data = container_of(nb, struct intel_reset_data, restart_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) intel_assert_device(&reset_data->rcdev, reset_data->reboot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static int intel_reset_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct intel_reset_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u32 rb_id[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) data->soc_data = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!data->soc_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) data->regmap = devm_regmap_init_mmio(dev, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) &intel_rcu_regmap_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (IS_ERR(data->regmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) dev_err(dev, "regmap initialization failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return PTR_ERR(data->regmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ret = device_property_read_u32_array(dev, "intel,global-reset", rb_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) data->soc_data->reset_cell_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dev_err(dev, "Failed to get global reset offset!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) data->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) data->rcdev.of_node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) data->rcdev.owner = dev->driver->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) data->rcdev.ops = &intel_reset_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) data->rcdev.of_xlate = intel_reset_xlate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) data->rcdev.of_reset_n_cells = data->soc_data->reset_cell_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ret = devm_reset_controller_register(&pdev->dev, &data->rcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) data->reboot_id = FIELD_PREP(REG_OFFSET_MASK, rb_id[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) data->reboot_id |= FIELD_PREP(BIT_OFFSET_MASK, rb_id[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (data->soc_data->legacy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, rb_id[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) data->restart_nb.notifier_call = intel_reset_restart_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) data->restart_nb.priority = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) register_restart_handler(&data->restart_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static const struct intel_reset_soc xrx200_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) .legacy = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .reset_cell_count = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static const struct intel_reset_soc lgm_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .legacy = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .reset_cell_count = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static const struct of_device_id intel_reset_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) { .compatible = "intel,rcu-lgm", .data = &lgm_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) { .compatible = "intel,rcu-xrx200", .data = &xrx200_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static struct platform_driver intel_reset_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .probe = intel_reset_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .name = "intel-reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .of_match_table = intel_reset_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int __init intel_reset_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return platform_driver_register(&intel_reset_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * RCU is system core entity which is in Always On Domain whose clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * or resource initialization happens in system core initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Also, it is required for most of the platform or architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * specific devices to perform reset operation as part of initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * So perform RCU as post core initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) postcore_initcall(intel_reset_init);