^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for FPGA Device Feature List (DFL) Support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2017-2018 Intel Corporation, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Kang Luwei <luwei.kang@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Zhang Yi <yi.z.zhang@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Wu Hao <hao.wu@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Xiao Guangrong <guangrong.xiao@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/fpga-dfl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "dfl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static DEFINE_MUTEX(dfl_id_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * when adding a new feature dev support in DFL framework, it's required to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * add a new item in enum dfl_id_type and provide related information in below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * platform device creation (define name strings in dfl.h, as they could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * reused by platform device drivers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * if the new feature dev needs chardev support, then it's required to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * index to dfl_chardevs table. If no chardev support just set devt_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * as one invalid index (DFL_FPGA_DEVT_MAX).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) enum dfl_fpga_devt_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) DFL_FPGA_DEVT_FME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) DFL_FPGA_DEVT_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) DFL_FPGA_DEVT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) "dfl-fme-pdata",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) "dfl-port-pdata",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * dfl_dev_info - dfl feature device information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * @name: name string of the feature platform device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @id: idr id of the feature dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @devt_type: index to dfl_chrdevs[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct dfl_dev_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u16 dfh_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct idr id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) enum dfl_fpga_devt_type devt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* it is indexed by dfl_id_type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static struct dfl_dev_info dfl_devs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) .devt_type = DFL_FPGA_DEVT_FME},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .devt_type = DFL_FPGA_DEVT_PORT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * dfl_chardev_info - chardev information of dfl feature device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @name: nmae string of the char device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @devt: devt of the char device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct dfl_chardev_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) dev_t devt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* indexed by enum dfl_fpga_devt_type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static struct dfl_chardev_info dfl_chrdevs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {.name = DFL_FPGA_FEATURE_DEV_FME},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {.name = DFL_FPGA_FEATURE_DEV_PORT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void dfl_ids_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) idr_init(&dfl_devs[i].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void dfl_ids_destroy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) idr_destroy(&dfl_devs[i].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) WARN_ON(type >= DFL_ID_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) mutex_lock(&dfl_id_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) mutex_unlock(&dfl_id_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void dfl_id_free(enum dfl_id_type type, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) WARN_ON(type >= DFL_ID_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mutex_lock(&dfl_id_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) idr_remove(&dfl_devs[type].id, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) mutex_unlock(&dfl_id_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!strcmp(dfl_devs[i].name, pdev->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return DFL_ID_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static enum dfl_id_type dfh_id_to_type(u16 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (dfl_devs[i].dfh_id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return DFL_ID_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * introduce a global port_ops list, it allows port drivers to register ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * in such list, then other feature devices (e.g. FME), could use the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * functions even related port platform device is hidden. Below is one example,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * enabled, port (and it's AFU) is turned into VF and port platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * is hidden from system but it's still required to access port to finish FPGA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * reconfiguration function in FME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static DEFINE_MUTEX(dfl_port_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static LIST_HEAD(dfl_port_ops_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * dfl_fpga_port_ops_get - get matched port ops from the global list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @pdev: platform device to match with associated port ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Return: matched port ops on success, NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Please note that must dfl_fpga_port_ops_put after use the port_ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct dfl_fpga_port_ops *ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) mutex_lock(&dfl_port_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (list_empty(&dfl_port_ops_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) list_for_each_entry(ops, &dfl_port_ops_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* match port_ops using the name of platform device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!strcmp(pdev->name, ops->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!try_module_get(ops->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) mutex_unlock(&dfl_port_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * dfl_fpga_port_ops_put - put port ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * @ops: port ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (ops && ops->owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * dfl_fpga_port_ops_add - add port_ops to global list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * @ops: port ops to add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mutex_lock(&dfl_port_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) list_add_tail(&ops->node, &dfl_port_ops_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) mutex_unlock(&dfl_port_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * dfl_fpga_port_ops_del - remove port_ops from global list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @ops: port ops to del.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) mutex_lock(&dfl_port_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) list_del(&ops->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) mutex_unlock(&dfl_port_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * dfl_fpga_check_port_id - check the port id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * @pdev: port platform device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * @pport_id: port id to compare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Return: 1 if port device matches with given port id, otherwise 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct dfl_fpga_port_ops *port_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (pdata->id != FEATURE_DEV_ID_UNUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return pdata->id == *(int *)pport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) port_ops = dfl_fpga_port_ops_get(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!port_ops || !port_ops->get_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pdata->id = port_ops->get_id(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dfl_fpga_port_ops_put(port_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return pdata->id == *(int *)pport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static DEFINE_IDA(dfl_device_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static const struct dfl_device_id *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (id->type == ddev->type && id->feature_id == ddev->feature_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int dfl_bus_match(struct device *dev, struct device_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct dfl_device *ddev = to_dfl_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct dfl_driver *ddrv = to_dfl_drv(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) const struct dfl_device_id *id_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) id_entry = ddrv->id_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (id_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) while (id_entry->feature_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (dfl_match_one_device(id_entry, ddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ddev->id_entry = id_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) id_entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int dfl_bus_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct dfl_device *ddev = to_dfl_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return ddrv->probe(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static int dfl_bus_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct dfl_device *ddev = to_dfl_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (ddrv->remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ddrv->remove(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct dfl_device *ddev = to_dfl_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* The type has 4 valid bits and feature_id has 12 valid bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return add_uevent_var(env, "MODALIAS=dfl:t%01Xf%03X",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ddev->type, ddev->feature_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) type_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct dfl_device *ddev = to_dfl_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return sprintf(buf, "0x%x\n", ddev->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static DEVICE_ATTR_RO(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) feature_id_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct dfl_device *ddev = to_dfl_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return sprintf(buf, "0x%x\n", ddev->feature_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static DEVICE_ATTR_RO(feature_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static struct attribute *dfl_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) &dev_attr_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) &dev_attr_feature_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ATTRIBUTE_GROUPS(dfl_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static struct bus_type dfl_bus_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .name = "dfl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) .match = dfl_bus_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .probe = dfl_bus_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .remove = dfl_bus_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .uevent = dfl_bus_uevent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .dev_groups = dfl_dev_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void release_dfl_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct dfl_device *ddev = to_dfl_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (ddev->mmio_res.parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) release_resource(&ddev->mmio_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ida_simple_remove(&dfl_device_ida, ddev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) kfree(ddev->irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) kfree(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static struct dfl_device *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dfl_dev_add(struct dfl_feature_platform_data *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct dfl_feature *feature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct platform_device *pdev = pdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct resource *parent_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct dfl_device *ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int id, i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) id = ida_simple_get(&dfl_device_ida, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dev_err(&pdev->dev, "unable to get id\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) kfree(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return ERR_PTR(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* freeing resources by put_device() after device_initialize() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) device_initialize(&ddev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ddev->dev.parent = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ddev->dev.bus = &dfl_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ddev->dev.release = release_dfl_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ddev->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ret = dev_set_name(&ddev->dev, "dfl_dev.%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) goto put_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ddev->type = feature_dev_id_type(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ddev->feature_id = feature->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ddev->cdev = pdata->dfl_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* add mmio resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) parent_res = &pdev->resource[feature->resource_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ddev->mmio_res.flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ddev->mmio_res.start = parent_res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ddev->mmio_res.end = parent_res->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ddev->mmio_res.name = dev_name(&ddev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = insert_resource(parent_res, &ddev->mmio_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) dev_err(&pdev->dev, "%s failed to claim resource: %pR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dev_name(&ddev->dev), &ddev->mmio_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) goto put_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* then add irq resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (feature->nr_irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ddev->irqs = kcalloc(feature->nr_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) sizeof(*ddev->irqs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!ddev->irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto put_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) for (i = 0; i < feature->nr_irqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ddev->irqs[i] = feature->irq_ctx[i].irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ddev->num_irqs = feature->nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ret = device_add(&ddev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) goto put_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) dev_dbg(&pdev->dev, "add dfl_dev: %s\n", dev_name(&ddev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) put_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* calls release_dfl_dev() which does the clean up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) put_device(&ddev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct dfl_feature *feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dfl_fpga_dev_for_each_feature(pdata, feature) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (feature->ddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) device_unregister(&feature->ddev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) feature->ddev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct dfl_feature *feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct dfl_device *ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dfl_fpga_dev_for_each_feature(pdata, feature) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (feature->ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (feature->ddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ddev = dfl_dev_add(pdata, feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (IS_ERR(ddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ret = PTR_ERR(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) feature->ddev = ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dfl_devs_remove(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!dfl_drv || !dfl_drv->probe || !dfl_drv->id_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dfl_drv->drv.owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dfl_drv->drv.bus = &dfl_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return driver_register(&dfl_drv->drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) EXPORT_SYMBOL(__dfl_driver_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) void dfl_driver_unregister(struct dfl_driver *dfl_drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) driver_unregister(&dfl_drv->drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) EXPORT_SYMBOL(dfl_driver_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @pdev: feature device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct dfl_feature *feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dfl_devs_remove(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dfl_fpga_dev_for_each_feature(pdata, feature) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (feature->ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (feature->ops->uinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) feature->ops->uinit(pdev, feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) feature->ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static int dfl_feature_instance_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct dfl_feature_platform_data *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct dfl_feature *feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct dfl_feature_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!is_header_feature(feature)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) base = devm_platform_ioremap_resource(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) feature->resource_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (IS_ERR(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) "ioremap failed for feature 0x%x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) feature->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) feature->ioaddr = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (drv->ops->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = drv->ops->init(pdev, feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) feature->ops = drv->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static bool dfl_feature_drv_match(struct dfl_feature *feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct dfl_feature_driver *driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) const struct dfl_feature_id *ids = driver->id_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) while (ids->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (ids->id == feature->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ids++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * @pdev: feature device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * @feature_drvs: drvs for sub features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * This function will match sub features with given feature drvs list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * use matched drv to init related sub feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int dfl_fpga_dev_feature_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct dfl_feature_driver *feature_drvs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct dfl_feature_driver *drv = feature_drvs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct dfl_feature *feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) while (drv->ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dfl_fpga_dev_for_each_feature(pdata, feature) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (dfl_feature_drv_match(feature, drv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ret = dfl_feature_instance_init(pdev, pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) feature, drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) drv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ret = dfl_devs_add(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) dfl_fpga_dev_feature_uinit(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void dfl_chardev_uinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (MAJOR(dfl_chrdevs[i].devt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) unregister_chrdev_region(dfl_chrdevs[i].devt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) MINORMASK + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dfl_chrdevs[i].devt = MKDEV(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static int dfl_chardev_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) MINORMASK + 1, dfl_chrdevs[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dfl_chardev_uinit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (type >= DFL_FPGA_DEVT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * dfl_fpga_dev_ops_register - register cdev ops for feature dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * @pdev: feature dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * @fops: file operations for feature dev's cdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * @owner: owning module/driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) int dfl_fpga_dev_ops_register(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) const struct file_operations *fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct module *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) cdev_init(&pdata->cdev, fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) pdata->cdev.owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * set parent to the feature device so that its refcount is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * decreased after the last refcount of cdev is gone, that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * makes sure the feature device is valid during device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * file's life-cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) pdata->cdev.kobj.parent = &pdev->dev.kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * @pdev: feature dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) cdev_del(&pdata->cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * struct build_feature_devs_info - info collected during feature dev build.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * @dev: device to enumerate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * @cdev: the container device for all feature devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * @nr_irqs: number of irqs for all feature devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * @feature_dev: current feature device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * @ioaddr: header register region address of current FIU in enumeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * @start: register resource start of current FIU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * @len: max register resource length of current FIU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * @sub_features: a sub features linked list for feature device in enumeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * @feature_num: number of sub features for feature device in enumeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct build_feature_devs_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct dfl_fpga_cdev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned int nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) int *irq_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct platform_device *feature_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) resource_size_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) resource_size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct list_head sub_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int feature_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * struct dfl_feature_info - sub feature info collected during feature dev build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * @fid: id of this sub feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * @mmio_res: mmio resource of this sub feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * @ioaddr: mapped base address of mmio resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * @node: node in sub_features linked list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * @irq_base: start of irq index in this sub feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * @nr_irqs: number of irqs of this sub feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct dfl_feature_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) u16 fid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct resource mmio_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) unsigned int irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned int nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct platform_device *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) mutex_lock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) list_add(&pdata->node, &cdev->port_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) get_device(&pdata->dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) mutex_unlock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * register current feature device, it is called when we need to switch to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * another feature parsing or we have parsed all features on given device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * feature list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static int build_info_commit_dev(struct build_feature_devs_info *binfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct platform_device *fdev = binfo->feature_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct dfl_feature_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct dfl_feature_info *finfo, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) enum dfl_id_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int ret, index = 0, res_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) type = feature_dev_id_type(fdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (WARN_ON_ONCE(type >= DFL_ID_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * we do not need to care for the memory which is associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * the platform device. After calling platform_device_unregister(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * it will be automatically freed by device's release() callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * platform_device_release().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) pdata->dev = fdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) pdata->num = binfo->feature_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) pdata->dfl_cdev = binfo->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) pdata->id = FEATURE_DEV_ID_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mutex_init(&pdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) dfl_pdata_key_strings[type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * the count should be initialized to 0 to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) *__fpga_port_enable() following __fpga_port_disable()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * works properly for port device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * and it should always be 0 for fme device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) WARN_ON(pdata->disable_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) fdev->dev.platform_data = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* each sub feature has one MMIO resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) fdev->num_resources = binfo->feature_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!fdev->resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* fill features and resource information for feature dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct dfl_feature *feature = &pdata->features[index++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct dfl_feature_irq_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* save resource information for each feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) feature->dev = fdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) feature->id = finfo->fid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * the FIU header feature has some fundamental functions (sriov
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * set, port enable/disable) needed for the dfl bus device and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * other sub features. So its mmio resource should be mapped by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * DFL bus device. And we should not assign it to feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * devices (dfl-fme/afu) again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (is_header_feature(feature)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) feature->resource_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) feature->ioaddr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) devm_ioremap_resource(binfo->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) &finfo->mmio_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (IS_ERR(feature->ioaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return PTR_ERR(feature->ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) feature->resource_index = res_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) fdev->resource[res_idx++] = finfo->mmio_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (finfo->nr_irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) for (i = 0; i < finfo->nr_irqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ctx[i].irq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) binfo->irq_table[finfo->irq_base + i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) feature->irq_ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) feature->nr_irqs = finfo->nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) list_del(&finfo->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) kfree(finfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ret = platform_device_add(binfo->feature_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (type == PORT_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dfl_fpga_cdev_add_port_dev(binfo->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) binfo->feature_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) binfo->cdev->fme_dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) get_device(&binfo->feature_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * reset it to avoid build_info_free() freeing their resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * The resource of successfully registered feature devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * will be freed by platform_device_unregister(). See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * comments in build_info_create_dev().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) binfo->feature_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) build_info_create_dev(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) enum dfl_id_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct platform_device *fdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (type >= DFL_ID_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * we use -ENODEV as the initialization indicator which indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * whether the id need to be reclaimed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (!fdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) binfo->feature_dev = fdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) binfo->feature_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) INIT_LIST_HEAD(&binfo->sub_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) fdev->id = dfl_id_alloc(type, &fdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (fdev->id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return fdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) fdev->dev.parent = &binfo->cdev->region->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static void build_info_free(struct build_feature_devs_info *binfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct dfl_feature_info *finfo, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * it is a valid id, free it. See comments in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * build_info_create_dev()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dfl_id_free(feature_dev_id_type(binfo->feature_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) binfo->feature_dev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) list_del(&finfo->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) kfree(finfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) platform_device_put(binfo->feature_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) devm_kfree(binfo->dev, binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static inline u32 feature_size(void __iomem *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) u64 v = readq(start + DFH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* workaround for private features with invalid size, use 4K instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return ofst ? ofst : 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static u16 feature_id(void __iomem *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) u64 v = readq(start + DFH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) u16 id = FIELD_GET(DFH_ID, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) u8 type = FIELD_GET(DFH_TYPE, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (type == DFH_TYPE_FIU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return FEATURE_ID_FIU_HEADER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) else if (type == DFH_TYPE_PRIVATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) else if (type == DFH_TYPE_AFU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return FEATURE_ID_AFU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) static int parse_feature_irqs(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) resource_size_t ofst, u16 fid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) unsigned int *irq_base, unsigned int *nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) void __iomem *base = binfo->ioaddr + ofst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) unsigned int i, ibase, inr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * Ideally DFL framework should only read info from DFL header, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * current version DFL only provides mmio resources information for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * each feature in DFL Header, no field for interrupt resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * Interrupt resource information is provided by specific mmio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * registers of each private feature which supports interrupt. So in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * order to parse and assign irq resources, DFL framework has to look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * into specific capability registers of these private features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * Once future DFL version supports generic interrupt resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * information in common DFL headers, the generic interrupt parsing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * code will be added. But in order to be compatible to old version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * DFL, the driver may still fall back to these quirks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) switch (fid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case PORT_FEATURE_ID_UINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) v = readq(base + PORT_UINT_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) case PORT_FEATURE_ID_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) v = readq(base + PORT_ERROR_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) case FME_FEATURE_ID_GLOBAL_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) v = readq(base + FME_ERROR_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!inr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) *irq_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *nr_irqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) dev_dbg(binfo->dev, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) fid, ibase, inr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (ibase + inr > binfo->nr_irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dev_err(binfo->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) "Invalid interrupt number in feature 0x%x\n", fid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) for (i = 0; i < inr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) virq = binfo->irq_table[ibase + i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (virq < 0 || virq > NR_IRQS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) dev_err(binfo->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) "Invalid irq table entry for feature 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) fid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) *irq_base = ibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) *nr_irqs = inr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * when create sub feature instances, for private features, it doesn't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * to provide resource size and feature id as they could be read from DFH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * register. For afu sub feature, its register region only contains user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * defined registers, so never trust any information from it, just use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * resource size information provided by its parent FIU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) create_feature_instance(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) resource_size_t ofst, resource_size_t size, u16 fid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) unsigned int irq_base, nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct dfl_feature_info *finfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* read feature size and id if inputs are invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) size = size ? size : feature_size(binfo->ioaddr + ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) fid = fid ? fid : feature_id(binfo->ioaddr + ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (binfo->len - ofst < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (!finfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) finfo->fid = fid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) finfo->mmio_res.start = binfo->start + ofst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) finfo->mmio_res.flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) finfo->irq_base = irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) finfo->nr_irqs = nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) list_add_tail(&finfo->node, &binfo->sub_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) binfo->feature_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) resource_size_t ofst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) WARN_ON(!size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) #define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static int parse_feature_afu(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) resource_size_t ofst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (!is_feature_dev_detected(binfo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) switch (feature_dev_id_type(binfo->feature_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) case PORT_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return parse_feature_port_afu(binfo, ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) binfo->feature_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static int build_info_prepare(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) resource_size_t start, resource_size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct device *dev = binfo->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (!devm_request_mem_region(dev, start, len, dev_name(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) dev_err(dev, "request region fail, start:%pa, len:%pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) &start, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ioaddr = devm_ioremap(dev, start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!ioaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) &start, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) binfo->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) binfo->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) binfo->ioaddr = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static void build_info_complete(struct build_feature_devs_info *binfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) devm_iounmap(binfo->dev, binfo->ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) devm_release_mem_region(binfo->dev, binfo->start, binfo->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static int parse_feature_fiu(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) resource_size_t ofst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) u16 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (is_feature_dev_detected(binfo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) build_info_complete(binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) ret = build_info_commit_dev(binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ret = build_info_prepare(binfo, binfo->start + ofst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) binfo->len - ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) v = readq(binfo->ioaddr + DFH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) id = FIELD_GET(DFH_ID, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* create platform device for dfl feature dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ret = build_info_create_dev(binfo, dfh_id_to_type(id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ret = create_feature_instance(binfo, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * find and parse FIU's child AFU via its NEXT_AFU register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * please note that only Port has valid NEXT_AFU pointer per spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) v = readq(binfo->ioaddr + NEXT_AFU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return parse_feature_afu(binfo, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static int parse_feature_private(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) resource_size_t ofst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (!is_feature_dev_detected(binfo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) feature_id(binfo->ioaddr + ofst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return create_feature_instance(binfo, ofst, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * parse_feature - parse a feature on given device feature list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * @binfo: build feature devices information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * @ofst: offset to current FIU header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static int parse_feature(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) resource_size_t ofst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) v = readq(binfo->ioaddr + ofst + DFH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) type = FIELD_GET(DFH_TYPE, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) case DFH_TYPE_AFU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return parse_feature_afu(binfo, ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) case DFH_TYPE_PRIVATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return parse_feature_private(binfo, ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) case DFH_TYPE_FIU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return parse_feature_fiu(binfo, ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) dev_info(binfo->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) "Feature Type %x is not supported.\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static int parse_feature_list(struct build_feature_devs_info *binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) resource_size_t start, resource_size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) resource_size_t end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) u32 ofst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ret = build_info_prepare(binfo, start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* walk through the device feature list via DFH's next DFH pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) for (; start < end; start += ofst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (end - start < DFH_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) dev_err(binfo->dev, "The region is too small to contain a feature.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) ret = parse_feature(binfo, start - binfo->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) v = readq(binfo->ioaddr + start - binfo->start + DFH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /* stop parsing if EOL(End of List) is set or offset is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if ((v & DFH_EOL) || !ofst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* commit current feature device when reach the end of list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) build_info_complete(binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (is_feature_dev_detected(binfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ret = build_info_commit_dev(binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) struct dfl_fpga_enum_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) info->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) INIT_LIST_HEAD(&info->dfls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct dfl_fpga_enum_dfl *tmp, *dfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* remove all device feature lists in the list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) list_del(&dfl->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) devm_kfree(dev, dfl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) /* remove irq table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (info->irq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) devm_kfree(dev, info->irq_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) devm_kfree(dev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * @info: ptr to dfl_fpga_enum_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * @start: mmio resource address of the device feature list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * @len: mmio resource length of the device feature list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * One FPGA device may have one or more Device Feature Lists (DFLs), use this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * function to add information of each DFL to common data structure for next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * step enumeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) resource_size_t start, resource_size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct dfl_fpga_enum_dfl *dfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (!dfl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) dfl->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) dfl->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) list_add_tail(&dfl->node, &info->dfls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * dfl_fpga_enum_info_add_irq - add irq table to enum info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * @info: ptr to dfl_fpga_enum_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * @nr_irqs: number of irqs of the DFL fpga device to be enumerated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * One FPGA device may have several interrupts. This function adds irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * information of the DFL fpga device to enum info for next step enumeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * This function should be called before dfl_fpga_feature_devs_enumerate().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * As we only support one irq domain for all DFLs in the same enum info, adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * irq table a second time for the same enum info will return error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * If we need to enumerate DFLs which belong to different irq domains, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * should fill more enum info and enumerate them one by one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) unsigned int nr_irqs, int *irq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (!nr_irqs || !irq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (info->irq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) info->irq_table = devm_kmemdup(info->dev, irq_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) sizeof(int) * nr_irqs, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (!info->irq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) info->nr_irqs = nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static int remove_feature_dev(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) enum dfl_id_type type = feature_dev_id_type(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) int id = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) platform_device_unregister(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) dfl_id_free(type, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * dfl_fpga_feature_devs_enumerate - enumerate feature devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * @info: information for enumeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * This function creates a container device (base FPGA region), enumerates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * feature devices based on the enumeration info and creates platform devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * under the container device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * Return: dfl_fpga_cdev struct on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct dfl_fpga_cdev *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct build_feature_devs_info *binfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct dfl_fpga_enum_dfl *dfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct dfl_fpga_cdev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (!info->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (!cdev->region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) goto free_cdev_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) cdev->parent = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) mutex_init(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) INIT_LIST_HEAD(&cdev->port_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) ret = fpga_region_register(cdev->region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) goto free_cdev_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /* create and init build info for enumeration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (!binfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) goto unregister_region_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) binfo->dev = info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) binfo->cdev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) binfo->nr_irqs = info->nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (info->nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) binfo->irq_table = info->irq_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * start enumeration for all feature devices based on Device Feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * Lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) list_for_each_entry(dfl, &info->dfls, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) ret = parse_feature_list(binfo, dfl->start, dfl->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) remove_feature_devs(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) build_info_free(binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) goto unregister_region_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) build_info_free(binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) unregister_region_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) fpga_region_unregister(cdev->region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) free_cdev_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) devm_kfree(info->dev, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * dfl_fpga_feature_devs_remove - remove all feature devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * @cdev: fpga container device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * Remove the container device and all feature devices under given container
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct dfl_feature_platform_data *pdata, *ptmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) mutex_lock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (cdev->fme_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) put_device(cdev->fme_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct platform_device *port_dev = pdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* remove released ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (!device_is_registered(&port_dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) dfl_id_free(feature_dev_id_type(port_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) port_dev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) platform_device_put(port_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) list_del(&pdata->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) put_device(&port_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) mutex_unlock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) remove_feature_devs(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) fpga_region_unregister(cdev->region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) devm_kfree(cdev->parent, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * __dfl_fpga_cdev_find_port - find a port under given container device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * @cdev: container device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * @data: data passed to match function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * @match: match function used to find specific port from the port device list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * Find a port device under container device. This function needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * invoked with lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * Return: pointer to port's platform device if successful, NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * NOTE: you will need to drop the device reference with put_device() after use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) struct platform_device *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) __dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) int (*match)(struct platform_device *, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) struct dfl_feature_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) struct platform_device *port_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) list_for_each_entry(pdata, &cdev->port_dev_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) port_dev = pdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (match(port_dev, data) && get_device(&port_dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return port_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) static int __init dfl_fpga_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) ret = bus_register(&dfl_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) dfl_ids_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) ret = dfl_chardev_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) dfl_ids_destroy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) bus_unregister(&dfl_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * dfl_fpga_cdev_release_port - release a port platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * @cdev: parent container device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * @port_id: id of the port platform device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * This function allows user to release a port platform device. This is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * mandatory step before turn a port from PF into VF for SRIOV support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) struct dfl_feature_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct platform_device *port_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) mutex_lock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) dfl_fpga_check_port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) if (!port_pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) goto unlock_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!device_is_registered(&port_pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) goto put_dev_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) pdata = dev_get_platdata(&port_pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) mutex_lock(&pdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) ret = dfl_feature_dev_use_begin(pdata, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) mutex_unlock(&pdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) goto put_dev_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) platform_device_del(port_pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) cdev->released_port_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) put_dev_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) put_device(&port_pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) unlock_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) mutex_unlock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * dfl_fpga_cdev_assign_port - assign a port platform device back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * @cdev: parent container device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * @port_id: id of the port platform device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * This function allows user to assign a port platform device back. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * a mandatory step after disable SRIOV support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct dfl_feature_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct platform_device *port_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) mutex_lock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) dfl_fpga_check_port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (!port_pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) goto unlock_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (device_is_registered(&port_pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) goto put_dev_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) ret = platform_device_add(port_pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) goto put_dev_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) pdata = dev_get_platdata(&port_pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) mutex_lock(&pdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) dfl_feature_dev_use_end(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) mutex_unlock(&pdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) cdev->released_port_num--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) put_dev_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) put_device(&port_pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) unlock_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) mutex_unlock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) static void config_port_access_mode(struct device *fme_dev, int port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) bool is_vf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) v = readq(base + FME_HDR_PORT_OFST(port_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) v &= ~FME_PORT_OFST_ACC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) writeq(v, base + FME_HDR_PORT_OFST(port_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) #define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) #define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * @cdev: parent container device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * This function is needed in sriov configuration routine. It could be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * configure the all released ports from VF access mode to PF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct dfl_feature_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) mutex_lock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) list_for_each_entry(pdata, &cdev->port_dev_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (device_is_registered(&pdata->dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) config_port_pf_mode(cdev->fme_dev, pdata->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) mutex_unlock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * @cdev: parent container device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * @num_vfs: VF device number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * This function is needed in sriov configuration routine. It could be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * configure the released ports from PF access mode to VF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) struct dfl_feature_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) mutex_lock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * device, so if released port number doesn't match VF device number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * then reject the request with -EINVAL error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (cdev->released_port_num != num_vfs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) list_for_each_entry(pdata, &cdev->port_dev_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (device_is_registered(&pdata->dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) config_port_vf_mode(cdev->fme_dev, pdata->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) mutex_unlock(&cdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static irqreturn_t dfl_irq_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) struct eventfd_ctx *trigger = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) eventfd_signal(trigger, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) struct platform_device *pdev = feature->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) struct eventfd_ctx *trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) irq = feature->irq_ctx[idx].irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (feature->irq_ctx[idx].trigger) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) free_irq(irq, feature->irq_ctx[idx].trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) kfree(feature->irq_ctx[idx].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) eventfd_ctx_put(feature->irq_ctx[idx].trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) feature->irq_ctx[idx].trigger = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) feature->irq_ctx[idx].name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%x)", idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) dev_name(&pdev->dev), feature->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (!feature->irq_ctx[idx].name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) trigger = eventfd_ctx_fdget(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (IS_ERR(trigger)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) ret = PTR_ERR(trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) goto free_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) ret = request_irq(irq, dfl_irq_handler, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) feature->irq_ctx[idx].name, trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) feature->irq_ctx[idx].trigger = trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) eventfd_ctx_put(trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) free_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) kfree(feature->irq_ctx[idx].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * @feature: dfl sub feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * @start: start of irq index in this dfl sub feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * @count: number of irqs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * unbind "count" specified number of irqs if fds ptr is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) unsigned int count, int32_t *fds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /* overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) if (unlikely(start + count < start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* exceeds nr_irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (start + count > feature->nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) int fd = fds ? fds[i] : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) ret = do_set_irq_trigger(feature, start + i, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) do_set_irq_trigger(feature, start + i, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * @pdev: the feature device which has the sub feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * @feature: the dfl sub feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * @arg: ioctl argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct dfl_feature *feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) return put_user(feature->nr_irqs, (__u32 __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * @pdev: the feature device which has the sub feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * @feature: the dfl sub feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * @arg: ioctl argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * Return: 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct dfl_feature *feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) struct dfl_fpga_irq_set hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) s32 *fds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) if (!feature->nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) (hdr.start + hdr.count < hdr.start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) fds = memdup_user((void __user *)(arg + sizeof(hdr)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) hdr.count * sizeof(s32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (IS_ERR(fds))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) return PTR_ERR(fds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) mutex_lock(&pdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) mutex_unlock(&pdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) kfree(fds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) static void __exit dfl_fpga_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) dfl_chardev_uinit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) dfl_ids_destroy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) bus_unregister(&dfl_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) module_init(dfl_fpga_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) module_exit(dfl_fpga_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) MODULE_AUTHOR("Intel Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) MODULE_LICENSE("GPL v2");