^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2008-2011, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Description: Data Center Bridging netlink interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Lucy Liu <lucy.liu@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <net/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dcbnl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/dcbevent.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * intended to allow network traffic with differing requirements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * (highly reliable, no drops vs. best effort vs. low latency) to operate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * and co-exist on Ethernet. Current DCB features are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * framework for assigning bandwidth guarantees to traffic classes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Priority-based Flow Control (PFC) - provides a flow control mechanism which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * can work independently for each 802.1p priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Congestion Notification - provides a mechanism for end-to-end congestion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * control for protocols which do not have built-in congestion management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * More information about the emerging standards for these Ethernet features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * This file implements an rtnetlink interface to allow configuration of DCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * features for capable devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /**************** DCB attribute policies *************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* DCB netlink attributes policy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) [DCB_ATTR_STATE] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) [DCB_ATTR_CAP] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) [DCB_ATTR_BCN] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) [DCB_ATTR_APP] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) [DCB_ATTR_DCBX] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* DCB priority flow control to User Priority nested attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* DCB priority grouping nested attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* DCB traffic class nested attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* DCB capabilities nested attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* DCB capabilities nested attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* DCB BCN nested attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) [DCB_BCN_ATTR_W] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) [DCB_BCN_ATTR_C] = {.type = NLA_U32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* DCB APP nested attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) [DCB_APP_ATTR_ID] = {.type = NLA_U16},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* IEEE 802.1Qaz nested attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* DCB number of traffic classes nested attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static LIST_HEAD(dcb_app_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static DEFINE_SPINLOCK(dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u32 flags, struct nlmsghdr **nlhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct dcbmsg *dcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) BUG_ON(!nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dcb = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dcb->dcb_family = AF_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) dcb->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dcb->dcb_pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (nlhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *nlhp = nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!netdev->dcbnl_ops->getstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return nla_put_u8(skb, DCB_ATTR_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) netdev->dcbnl_ops->getstate(netdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int getall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!tb[DCB_ATTR_PFC_CFG])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!netdev->dcbnl_ops->getpfccfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) tb[DCB_ATTR_PFC_CFG],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dcbnl_pfc_up_nest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (data[DCB_PFC_UP_ATTR_ALL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) getall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!getall && !data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ret = nla_put_u8(skb, i, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u8 perm_addr[MAX_ADDR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (!netdev->dcbnl_ops->getpermhwaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) memset(perm_addr, 0, sizeof(perm_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int getall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!tb[DCB_ATTR_CAP])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (!netdev->dcbnl_ops->getcap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) tb[DCB_ATTR_CAP], dcbnl_cap_nest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (data[DCB_CAP_ATTR_ALL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) getall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!getall && !data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ret = nla_put_u8(skb, i, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int getall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!tb[DCB_ATTR_NUMTCS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!netdev->dcbnl_ops->getnumtcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tb[DCB_ATTR_NUMTCS],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dcbnl_numtcs_nest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (data[DCB_NUMTCS_ATTR_ALL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) getall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!getall && !data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ret = nla_put_u8(skb, i, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!tb[DCB_ATTR_NUMTCS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!netdev->dcbnl_ops->setnumtcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) tb[DCB_ATTR_NUMTCS],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dcbnl_numtcs_nest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (data[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) value = nla_get_u8(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!netdev->dcbnl_ops->getpfcstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) netdev->dcbnl_ops->getpfcstate(netdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!tb[DCB_ATTR_PFC_STATE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!netdev->dcbnl_ops->setpfcstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) netdev->dcbnl_ops->setpfcstate(netdev, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct nlattr *app_nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) u16 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u8 up, idtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (!tb[DCB_ATTR_APP])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) tb[DCB_ATTR_APP], dcbnl_app_nest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* all must be non-null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) (!app_tb[DCB_APP_ATTR_ID]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* either by eth type or by socket number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) (idtype != DCB_APP_IDTYPE_PORTNUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (netdev->dcbnl_ops->getapp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) up = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct dcb_app app = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) .selector = idtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) .protocol = id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) up = dcb_getapp(netdev, &app);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!app_nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) nla_nest_end(skb, app_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) out_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) nla_nest_cancel(skb, app_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u16 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u8 up, idtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!tb[DCB_ATTR_APP])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) tb[DCB_ATTR_APP], dcbnl_app_nest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* all must be non-null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) (!app_tb[DCB_APP_ATTR_ID]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) (!app_tb[DCB_APP_ATTR_PRIORITY]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* either by eth type or by socket number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) (idtype != DCB_APP_IDTYPE_PORTNUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (netdev->dcbnl_ops->setapp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct dcb_app app;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) app.selector = idtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) app.protocol = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) app.priority = up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ret = dcb_setapp(netdev, &app);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct nlattr **tb, struct sk_buff *skb, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct nlattr *pg_nest, *param_nest, *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) u8 prio, pgid, tc_pct, up_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int getall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!tb[DCB_ATTR_PG_CFG])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!netdev->dcbnl_ops->getpgtccfgtx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) !netdev->dcbnl_ops->getpgtccfgrx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) !netdev->dcbnl_ops->getpgbwgcfgtx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) !netdev->dcbnl_ops->getpgbwgcfgrx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (!pg_nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (pg_tb[DCB_PG_ATTR_TC_ALL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) getall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!getall && !pg_tb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (pg_tb[DCB_PG_ATTR_TC_ALL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) data = pg_tb[DCB_PG_ATTR_TC_ALL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) data = pg_tb[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ret = nla_parse_nested_deprecated(param_tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) DCB_TC_ATTR_PARAM_MAX, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) dcbnl_tc_param_nest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto err_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) param_nest = nla_nest_start_noflag(skb, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (!param_nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) goto err_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) pgid = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) prio = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) tc_pct = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) up_map = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) netdev->dcbnl_ops->getpgtccfgrx(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) i - DCB_PG_ATTR_TC_0, &prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) &pgid, &tc_pct, &up_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) netdev->dcbnl_ops->getpgtccfgtx(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) i - DCB_PG_ATTR_TC_0, &prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) &pgid, &tc_pct, &up_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) param_tb[DCB_TC_ATTR_PARAM_ALL]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ret = nla_put_u8(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) DCB_TC_ATTR_PARAM_PGID, pgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) goto err_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) param_tb[DCB_TC_ATTR_PARAM_ALL]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ret = nla_put_u8(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) goto err_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) param_tb[DCB_TC_ATTR_PARAM_ALL]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ret = nla_put_u8(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) goto err_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) param_tb[DCB_TC_ATTR_PARAM_ALL]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) tc_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) goto err_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) nla_nest_end(skb, param_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) getall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) getall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (!getall && !pg_tb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) tc_pct = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ret = nla_put_u8(skb, i, tc_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto err_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) nla_nest_end(skb, pg_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) err_param:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) nla_nest_cancel(skb, param_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) err_pg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) nla_nest_cancel(skb, pg_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!tb[DCB_ATTR_STATE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!netdev->dcbnl_ops->setstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) value = nla_get_u8(tb[DCB_ATTR_STATE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return nla_put_u8(skb, DCB_ATTR_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) netdev->dcbnl_ops->setstate(netdev, value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (!tb[DCB_ATTR_PFC_CFG])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!netdev->dcbnl_ops->setpfccfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) tb[DCB_ATTR_PFC_CFG],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dcbnl_pfc_up_nest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (data[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) value = nla_get_u8(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) netdev->dcbnl_ops->setpfccfg(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (!tb[DCB_ATTR_SET_ALL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!netdev->dcbnl_ops->setall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) netdev->dcbnl_ops->setall(netdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u32 seq, struct nlattr **tb, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) u8 pgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) u8 up_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) u8 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) u8 tc_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!tb[DCB_ATTR_PG_CFG])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!netdev->dcbnl_ops->setpgtccfgtx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) !netdev->dcbnl_ops->setpgtccfgrx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) !netdev->dcbnl_ops->setpgbwgcfgtx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) !netdev->dcbnl_ops->setpgbwgcfgrx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!pg_tb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ret = nla_parse_nested_deprecated(param_tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) DCB_TC_ATTR_PARAM_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) pg_tb[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) dcbnl_tc_param_nest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) pgid = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) prio = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) tc_pct = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) up_map = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) prio =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (param_tb[DCB_TC_ATTR_PARAM_PGID])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) up_map =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* dir: Tx = 0, Rx = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) netdev->dcbnl_ops->setpgtccfgrx(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) i - DCB_PG_ATTR_TC_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) prio, pgid, tc_pct, up_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) netdev->dcbnl_ops->setpgtccfgtx(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) i - DCB_PG_ATTR_TC_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) prio, pgid, tc_pct, up_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!pg_tb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) tc_pct = nla_get_u8(pg_tb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /* dir: Tx = 0, Rx = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) i - DCB_PG_ATTR_BW_ID_0, tc_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) i - DCB_PG_ATTR_BW_ID_0, tc_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct nlattr *bcn_nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) u8 value_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) u32 value_integer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) bool getall = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (!tb[DCB_ATTR_BCN])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!netdev->dcbnl_ops->getbcnrp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) !netdev->dcbnl_ops->getbcncfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!bcn_nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (bcn_tb[DCB_BCN_ATTR_ALL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) getall = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!getall && !bcn_tb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) &value_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) ret = nla_put_u8(skb, i, value_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) goto err_bcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (!getall && !bcn_tb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) netdev->dcbnl_ops->getbcncfg(netdev, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) &value_integer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ret = nla_put_u32(skb, i, value_integer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) goto err_bcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) nla_nest_end(skb, bcn_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) err_bcn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) nla_nest_cancel(skb, bcn_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) u8 value_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) u32 value_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (!tb[DCB_ATTR_BCN])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (!netdev->dcbnl_ops->setbcncfg ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) !netdev->dcbnl_ops->setbcnrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (data[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) value_byte = nla_get_u8(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) netdev->dcbnl_ops->setbcnrp(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (data[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) value_int = nla_get_u32(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) netdev->dcbnl_ops->setbcncfg(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) i, value_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return nla_put_u8(skb, DCB_ATTR_BCN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) int app_nested_type, int app_info_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) int app_entry_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct dcb_peer_app_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct dcb_app *table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) u16 app_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * retrieve the peer app configuration form the driver. If the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * handlers fail exit without doing anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) err = ops->peer_getappinfo(netdev, &info, &app_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (!err && app_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) table = kmalloc_array(app_count, sizeof(struct dcb_app),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) err = ops->peer_getapptable(netdev, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct nlattr *app;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * build the message, from here on the only possible failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * is due to the skb size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) app = nla_nest_start_noflag(skb, app_nested_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (!app)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (app_info_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) nla_put(skb, app_info_type, sizeof(info), &info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) for (i = 0; i < app_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) &table[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) nla_nest_end(skb, app);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct nlattr *ieee, *app;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) int dcbx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (!ieee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (ops->ieee_getets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct ieee_ets ets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) memset(&ets, 0, sizeof(ets));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) err = ops->ieee_getets(netdev, &ets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (ops->ieee_getmaxrate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct ieee_maxrate maxrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) memset(&maxrate, 0, sizeof(maxrate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) err = ops->ieee_getmaxrate(netdev, &maxrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) sizeof(maxrate), &maxrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (ops->ieee_getqcn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct ieee_qcn qcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) memset(&qcn, 0, sizeof(qcn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) err = ops->ieee_getqcn(netdev, &qcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) err = nla_put(skb, DCB_ATTR_IEEE_QCN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) sizeof(qcn), &qcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (ops->ieee_getqcnstats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct ieee_qcn_stats qcn_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) memset(&qcn_stats, 0, sizeof(qcn_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) err = ops->ieee_getqcnstats(netdev, &qcn_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) sizeof(qcn_stats), &qcn_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (ops->ieee_getpfc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct ieee_pfc pfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) memset(&pfc, 0, sizeof(pfc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) err = ops->ieee_getpfc(netdev, &pfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (!err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (ops->dcbnl_getbuffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct dcbnl_buffer buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) memset(&buffer, 0, sizeof(buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) err = ops->dcbnl_getbuffer(netdev, &buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (!err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (!app)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) list_for_each_entry(itr, &dcb_app_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (itr->ifindex == netdev->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) &itr->app);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (netdev->dcbnl_ops->getdcbx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dcbx = netdev->dcbnl_ops->getdcbx(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) dcbx = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) nla_nest_end(skb, app);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* get peer info if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (ops->ieee_peer_getets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct ieee_ets ets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) memset(&ets, 0, sizeof(ets));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) err = ops->ieee_peer_getets(netdev, &ets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (ops->ieee_peer_getpfc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct ieee_pfc pfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) memset(&pfc, 0, sizeof(pfc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) err = ops->ieee_peer_getpfc(netdev, &pfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (!err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (ops->peer_getappinfo && ops->peer_getapptable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) err = dcbnl_build_peer_app(netdev, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) DCB_ATTR_IEEE_PEER_APP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) DCB_ATTR_IEEE_APP_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) DCB_ATTR_IEEE_APP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) nla_nest_end(skb, ieee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (dcbx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) u8 pgid, up_map, prio, tc_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct nlattr *pg = nla_nest_start_noflag(skb, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct nlattr *tc_nest = nla_nest_start_noflag(skb, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (!tc_nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) pgid = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) prio = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) tc_pct = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) up_map = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (!dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) &prio, &pgid, &tc_pct, &up_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) &prio, &pgid, &tc_pct, &up_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) nla_nest_end(skb, tc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) tc_pct = DCB_ATTR_VALUE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (!dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) &tc_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) &tc_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (nla_put_u8(skb, i, tc_pct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) nla_nest_end(skb, pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct nlattr *cee, *app;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) int dcbx, i, err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (!cee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* local pg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) err = dcbnl_cee_pg_fill(skb, netdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) err = dcbnl_cee_pg_fill(skb, netdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* local pfc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (ops->getpfccfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct nlattr *pfc_nest = nla_nest_start_noflag(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) DCB_ATTR_CEE_PFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (!pfc_nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (nla_put_u8(skb, i, value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) nla_nest_end(skb, pfc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* local app */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (!app)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) goto dcb_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) list_for_each_entry(itr, &dcb_app_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (itr->ifindex == netdev->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct nlattr *app_nest = nla_nest_start_noflag(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) DCB_ATTR_APP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (!app_nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) goto dcb_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) itr->app.selector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) goto dcb_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) err = nla_put_u16(skb, DCB_APP_ATTR_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) itr->app.protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) goto dcb_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) itr->app.priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) goto dcb_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) nla_nest_end(skb, app_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) nla_nest_end(skb, app);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (netdev->dcbnl_ops->getdcbx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) dcbx = netdev->dcbnl_ops->getdcbx(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) dcbx = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /* features flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (ops->getfeatcfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct nlattr *feat = nla_nest_start_noflag(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) DCB_ATTR_CEE_FEAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (!feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (!ops->getfeatcfg(netdev, i, &value) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) nla_put_u8(skb, i, value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) nla_nest_end(skb, feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /* peer info if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (ops->cee_peer_getpg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct cee_pg pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) memset(&pg, 0, sizeof(pg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) err = ops->cee_peer_getpg(netdev, &pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (!err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (ops->cee_peer_getpfc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct cee_pfc pfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) memset(&pfc, 0, sizeof(pfc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) err = ops->cee_peer_getpfc(netdev, &pfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (!err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (ops->peer_getappinfo && ops->peer_getapptable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) err = dcbnl_build_peer_app(netdev, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) DCB_ATTR_CEE_PEER_APP_TABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) DCB_ATTR_CEE_PEER_APP_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) DCB_ATTR_CEE_PEER_APP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) nla_nest_end(skb, cee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* DCBX state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (dcbx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) dcb_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) static int dcbnl_notify(struct net_device *dev, int event, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) u32 seq, u32 portid, int dcbx_ver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) err = dcbnl_ieee_fill(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) err = dcbnl_cee_fill(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /* Report error to broadcast listeners */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) rtnl_set_sk_err(net, RTNLGRP_DCB, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* End nlmsg and notify broadcast listeners */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) u32 seq, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) EXPORT_SYMBOL(dcbnl_ieee_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) u32 seq, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) EXPORT_SYMBOL(dcbnl_cee_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * If any requested operation can not be completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * the entire msg is aborted and error value is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * No attempt is made to reconcile the case where only part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * cmd can be completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (!tb[DCB_ATTR_IEEE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) tb[DCB_ATTR_IEEE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) dcbnl_ieee_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) err = ops->ieee_setets(netdev, ets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct ieee_maxrate *maxrate =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) err = ops->ieee_setmaxrate(netdev, maxrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct ieee_qcn *qcn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) nla_data(ieee[DCB_ATTR_IEEE_QCN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) err = ops->ieee_setqcn(netdev, qcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) err = ops->ieee_setpfc(netdev, pfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct dcbnl_buffer *buffer =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) err = ops->dcbnl_setbuffer(netdev, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct dcb_app *app_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (nla_type(attr) != DCB_ATTR_IEEE_APP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (nla_len(attr) < sizeof(struct dcb_app)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) err = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) app_data = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (ops->ieee_setapp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) err = ops->ieee_setapp(netdev, app_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) err = dcb_ieee_setapp(netdev, app_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return dcbnl_ieee_fill(skb, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (!tb[DCB_ATTR_IEEE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) tb[DCB_ATTR_IEEE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) dcbnl_ieee_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) struct dcb_app *app_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (nla_type(attr) != DCB_ATTR_IEEE_APP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) app_data = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (ops->ieee_delapp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) err = ops->ieee_delapp(netdev, app_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) err = dcb_ieee_delapp(netdev, app_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /* DCBX configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (!netdev->dcbnl_ops->getdcbx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return nla_put_u8(skb, DCB_ATTR_DCBX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) netdev->dcbnl_ops->getdcbx(netdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (!netdev->dcbnl_ops->setdcbx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (!tb[DCB_ATTR_DCBX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) value = nla_get_u8(tb[DCB_ATTR_DCBX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return nla_put_u8(skb, DCB_ATTR_DCBX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) netdev->dcbnl_ops->setdcbx(netdev, value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) int getall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!netdev->dcbnl_ops->getfeatcfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (!tb[DCB_ATTR_FEATCFG])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) tb[DCB_ATTR_FEATCFG],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) dcbnl_featcfg_nest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (data[DCB_FEATCFG_ATTR_ALL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) getall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (!getall && !data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) ret = nla_put_u8(skb, i, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (!netdev->dcbnl_ops->setfeatcfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (!tb[DCB_ATTR_FEATCFG])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) tb[DCB_ATTR_FEATCFG],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) dcbnl_featcfg_nest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (data[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) value = nla_get_u8(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) /* Handle CEE DCBX GET commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) u32 seq, struct nlattr **tb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) return dcbnl_cee_fill(skb, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) struct reply_func {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) /* reply netlink message type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) /* function to fill message contents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) int (*cb)(struct net_device *, struct nlmsghdr *, u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct nlattr **, struct sk_buff *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct dcbmsg *dcb = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) struct nlattr *tb[DCB_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) u32 portid = NETLINK_CB(skb).portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct sk_buff *reply_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct nlmsghdr *reply_nlh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) const struct reply_func *fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) dcbnl_rtnl_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (dcb->cmd > DCB_CMD_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) /* check if a reply function has been defined for the command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) fn = &reply_funcs[dcb->cmd];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (!fn->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!tb[DCB_ATTR_IFNAME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (!netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (!netdev->dcbnl_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) nlh->nlmsg_flags, &reply_nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (!reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) nlmsg_free(reply_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) nlmsg_end(reply_skb, reply_nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) ret = rtnl_unicast(reply_skb, net, portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) int ifindex, int prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) list_for_each_entry(itr, &dcb_app_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (itr->app.selector == app->selector &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) itr->app.protocol == app->protocol &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) itr->ifindex == ifindex &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) ((prio == -1) || itr->app.priority == prio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) static int dcb_app_add(const struct dcb_app *app, int ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct dcb_app_type *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) memcpy(&entry->app, app, sizeof(*app));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) entry->ifindex = ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) list_add(&entry->list, &dcb_app_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * dcb_getapp - retrieve the DCBX application user priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * On success returns a non-zero 802.1p user priority bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * otherwise returns 0 as the invalid user priority bitmap to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * indicate an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) u8 prio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) itr = dcb_app_lookup(app, dev->ifindex, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (itr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) prio = itr->app.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) EXPORT_SYMBOL(dcb_getapp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * dcb_setapp - add CEE dcb application data to app list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * Priority 0 is an invalid priority in CEE spec. This routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * removes applications from the app list if the priority is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) int dcb_setapp(struct net_device *dev, struct dcb_app *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct dcb_app_type event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) event.ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) memcpy(&event.app, new, sizeof(event.app));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (dev->dcbnl_ops->getdcbx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) event.dcbx = dev->dcbnl_ops->getdcbx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) /* Search for existing match and replace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) itr = dcb_app_lookup(new, dev->ifindex, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (itr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (new->priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) itr->app.priority = new->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) list_del(&itr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) kfree(itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) /* App type does not exist add new application type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (new->priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) err = dcb_app_add(new, dev->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) call_dcbevent_notifiers(DCB_APP_EVENT, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) EXPORT_SYMBOL(dcb_setapp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) * Helper routine which on success returns a non-zero 802.1Qaz user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) * priority bitmap otherwise returns 0 to indicate the dcb_app was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * not found in APP list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) u8 prio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) itr = dcb_app_lookup(app, dev->ifindex, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (itr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) prio |= 1 << itr->app.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) EXPORT_SYMBOL(dcb_ieee_getapp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * dcb_ieee_setapp - add IEEE dcb application data to app list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * This adds Application data to the list. Multiple application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * entries may exists for the same selector and protocol as long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * as the priorities are different. Priority is expected to be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * 3-bit unsigned integer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) struct dcb_app_type event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) event.ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) memcpy(&event.app, new, sizeof(event.app));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (dev->dcbnl_ops->getdcbx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) event.dcbx = dev->dcbnl_ops->getdcbx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) /* Search for existing match and abort if found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) err = dcb_app_add(new, dev->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) call_dcbevent_notifiers(DCB_APP_EVENT, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) EXPORT_SYMBOL(dcb_ieee_setapp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * dcb_ieee_delapp - delete IEEE dcb application data from list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * This removes a matching APP data from the APP list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) struct dcb_app_type event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) int err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) event.ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) memcpy(&event.app, del, sizeof(event.app));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (dev->dcbnl_ops->getdcbx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) event.dcbx = dev->dcbnl_ops->getdcbx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /* Search for existing match and remove it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) list_del(&itr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) kfree(itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) call_dcbevent_notifiers(DCB_APP_EVENT, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) EXPORT_SYMBOL(dcb_ieee_delapp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * priorities to the DSCP values assigned to that priority. Initialize p_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * such that each map element holds a bit mask of DSCP values configured for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * that priority by APP entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) struct dcb_ieee_app_prio_map *p_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) int ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) u8 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) memset(p_map->map, 0, sizeof(p_map->map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) list_for_each_entry(itr, &dcb_app_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (itr->ifindex == ifindex &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) itr->app.protocol < 64 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) prio = itr->app.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) p_map->map[prio] |= 1ULL << itr->app.protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * DSCP values to the priorities assigned to that DSCP value. Initialize p_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * such that each map element holds a bit mask of priorities configured for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) * given DSCP value by APP entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) struct dcb_ieee_app_dscp_map *p_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) int ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) memset(p_map->map, 0, sizeof(p_map->map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) list_for_each_entry(itr, &dcb_app_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (itr->ifindex == ifindex &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) itr->app.protocol < 64 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) itr->app.priority < IEEE_8021QAZ_MAX_TCS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * type, with valid PID values >= 1536. A special meaning is then assigned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * protocol value of 0: "default priority. For use when priority is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) * otherwise specified".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * priorities set by these entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) int ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) struct dcb_app_type *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) u8 mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) list_for_each_entry(itr, &dcb_app_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (itr->ifindex == ifindex &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) itr->app.protocol == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) itr->app.priority < IEEE_8021QAZ_MAX_TCS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) mask |= 1 << itr->app.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) static void dcbnl_flush_dev(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) struct dcb_app_type *itr, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) spin_lock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (itr->ifindex == dev->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) list_del(&itr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) kfree(itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) spin_unlock_bh(&dcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) static int dcbnl_netdevice_event(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) case NETDEV_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (!dev->dcbnl_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) dcbnl_flush_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) static struct notifier_block dcbnl_nb __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) .notifier_call = dcbnl_netdevice_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static int __init dcbnl_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) INIT_LIST_HEAD(&dcb_app_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) err = register_netdevice_notifier(&dcbnl_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) device_initcall(dcbnl_init);