^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/sch_mqprio.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <net/sch_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct mqprio_sched {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct Qdisc **qdiscs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u16 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u16 shaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int hw_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u64 min_rate[TC_QOPT_MAX_QUEUE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u64 max_rate[TC_QOPT_MAX_QUEUE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void mqprio_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct mqprio_sched *priv = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned int ntx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (priv->qdiscs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) for (ntx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) ntx < dev->num_tx_queues && priv->qdiscs[ntx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) ntx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) qdisc_put(priv->qdiscs[ntx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) kfree(priv->qdiscs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct tc_mqprio_qopt_offload mqprio = { { 0 } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) switch (priv->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) case TC_MQPRIO_MODE_DCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) case TC_MQPRIO_MODE_CHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) dev->netdev_ops->ndo_setup_tc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) TC_SETUP_QDISC_MQPRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) &mqprio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) netdev_set_num_tc(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Verify num_tc is not out of max range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (qopt->num_tc > TC_MAX_QUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Verify priority mapping uses valid tcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) for (i = 0; i < TC_BITMASK + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (qopt->prio_tc_map[i] >= qopt->num_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Limit qopt->hw to maximum supported offload value. Drivers have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * the option of overriding this later if they don't support the a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * given offload type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* If hardware offload is requested we will leave it to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * to either populate the queue counts itself or to validate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * provided queue counts. If ndo_setup_tc is not present then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * hardware doesn't support offload and we should return an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (qopt->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) for (i = 0; i < qopt->num_tc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned int last = qopt->offset[i] + qopt->count[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Verify the queue count is in tx range being equal to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * real_num_tx_queues indicates the last queue is in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (qopt->offset[i] >= dev->real_num_tx_queues ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) !qopt->count[i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) last > dev->real_num_tx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Verify that the offset and counts do not overlap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) for (j = i + 1; j < qopt->num_tc; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (last > qopt->offset[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) [TCA_MQPRIO_MODE] = { .len = sizeof(u16) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) const struct nla_policy *policy, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int nested_len = nla_len(nla) - NLA_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (nested_len >= nla_attr_size(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return nla_parse_deprecated(tb, maxtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) nla_data(nla) + NLA_ALIGN(len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) nested_len, policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct mqprio_sched *priv = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct netdev_queue *dev_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int i, err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct tc_mqprio_qopt *qopt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct nlattr *tb[TCA_MQPRIO_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (sch->parent != TC_H_ROOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!netif_is_multiqueue(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* make certain can allocate enough classids to handle queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (!opt || nla_len(opt) < sizeof(*qopt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) qopt = nla_data(opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (mqprio_parse_opt(dev, qopt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) sizeof(*qopt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!qopt->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (tb[TCA_MQPRIO_MODE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) priv->flags |= TC_MQPRIO_F_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (tb[TCA_MQPRIO_SHAPER]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) priv->flags |= TC_MQPRIO_F_SHAPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (tb[TCA_MQPRIO_MIN_RATE64]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (i >= qopt->num_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) priv->min_rate[i] = *(u64 *)nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) priv->flags |= TC_MQPRIO_F_MIN_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (tb[TCA_MQPRIO_MAX_RATE64]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (i >= qopt->num_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) priv->max_rate[i] = *(u64 *)nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) priv->flags |= TC_MQPRIO_F_MAX_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* pre-allocate qdisc, attachment can't fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!priv->qdiscs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) for (i = 0; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) dev_queue = netdev_get_tx_queue(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) qdisc = qdisc_create_dflt(dev_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) get_default_qdisc_ops(dev, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) TC_H_MAKE(TC_H_MAJ(sch->handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) TC_H_MIN(i + 1)), extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) priv->qdiscs[i] = qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* If the mqprio options indicate that hardware should own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * the queue mapping then run ndo_setup_tc otherwise use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * supplied and verified mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (qopt->hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) switch (priv->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) case TC_MQPRIO_MODE_DCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) case TC_MQPRIO_MODE_CHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) mqprio.flags = priv->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (priv->flags & TC_MQPRIO_F_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) mqprio.mode = priv->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (priv->flags & TC_MQPRIO_F_SHAPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) mqprio.shaper = priv->shaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (priv->flags & TC_MQPRIO_F_MIN_RATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) for (i = 0; i < mqprio.qopt.num_tc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) mqprio.min_rate[i] = priv->min_rate[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (priv->flags & TC_MQPRIO_F_MAX_RATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for (i = 0; i < mqprio.qopt.num_tc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) mqprio.max_rate[i] = priv->max_rate[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) err = dev->netdev_ops->ndo_setup_tc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) TC_SETUP_QDISC_MQPRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) &mqprio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) priv->hw_offload = mqprio.qopt.hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) netdev_set_num_tc(dev, qopt->num_tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) for (i = 0; i < qopt->num_tc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) netdev_set_tc_queue(dev, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) qopt->count[i], qopt->offset[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Always use supplied priority mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) for (i = 0; i < TC_BITMASK + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) sch->flags |= TCQ_F_MQROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void mqprio_attach(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct mqprio_sched *priv = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct Qdisc *qdisc, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unsigned int ntx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Attach underlying qdisc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) qdisc = priv->qdiscs[ntx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) qdisc_put(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (ntx < dev->real_num_tx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) qdisc_hash_add(qdisc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) kfree(priv->qdiscs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) priv->qdiscs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned long ntx = cl - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (ntx >= dev->num_tx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return netdev_get_tx_queue(dev, ntx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct Qdisc **old, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!dev_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dev_deactivate(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *old = dev_graft_qdisc(dev_queue, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dev_activate(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int dump_rates(struct mqprio_sched *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct tc_mqprio_qopt *opt, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) for (i = 0; i < opt->num_tc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) sizeof(priv->min_rate[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) &priv->min_rate[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) for (i = 0; i < opt->num_tc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) sizeof(priv->max_rate[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) &priv->max_rate[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct mqprio_sched *priv = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct tc_mqprio_qopt opt = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned int ntx, tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) sch->q.qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) memset(&sch->bstats, 0, sizeof(sch->bstats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) memset(&sch->qstats, 0, sizeof(sch->qstats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* MQ supports lockless qdiscs. However, statistics accounting needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * to account for all, none, or a mix of locked and unlocked child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * qdiscs. Percpu stats are added to counters in-band and locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * qdisc totals are added at end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) spin_lock_bh(qdisc_lock(qdisc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (qdisc_is_percpu_stats(qdisc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) __u32 qlen = qdisc_qlen_sum(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) __gnet_stats_copy_basic(NULL, &sch->bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) qdisc->cpu_bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) &qdisc->bstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) __gnet_stats_copy_queue(&sch->qstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) qdisc->cpu_qstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) &qdisc->qstats, qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) sch->q.qlen += qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) sch->q.qlen += qdisc->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) sch->bstats.bytes += qdisc->bstats.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) sch->bstats.packets += qdisc->bstats.packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) sch->qstats.backlog += qdisc->qstats.backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sch->qstats.drops += qdisc->qstats.drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) sch->qstats.requeues += qdisc->qstats.requeues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) sch->qstats.overlimits += qdisc->qstats.overlimits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spin_unlock_bh(qdisc_lock(qdisc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) opt.num_tc = netdev_get_num_tc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) opt.hw = priv->hw_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) opt.count[tc] = dev->tc_to_txq[tc].count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) opt.offset[tc] = dev->tc_to_txq[tc].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if ((priv->flags & TC_MQPRIO_F_MODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) priv->flags & TC_MQPRIO_F_MAX_RATE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) (dump_rates(priv, &opt, skb) != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return nla_nest_end(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) nlmsg_trim(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!dev_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return dev_queue->qdisc_sleeping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) unsigned int ntx = TC_H_MIN(classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* There are essentially two regions here that have valid classid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * values. The first region will have a classid value of 1 through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * num_tx_queues. All of these are backed by actual Qdiscs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (ntx < TC_H_MIN_PRIORITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return (ntx <= dev->num_tx_queues) ? ntx : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* The second region represents the hardware traffic classes. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * are represented by classid values of TC_H_MIN_PRIORITY through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct sk_buff *skb, struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (cl < TC_H_MIN_PRIORITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int tc = netdev_txq_to_tc(dev, cl - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) tcm->tcm_parent = (tc < 0) ? 0 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) TC_H_MAKE(TC_H_MAJ(sch->handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) TC_H_MIN(tc + TC_H_MIN_PRIORITY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) tcm->tcm_parent = TC_H_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) tcm->tcm_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) tcm->tcm_handle |= TC_H_MIN(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) __releases(d->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) __acquires(d->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (cl >= TC_H_MIN_PRIORITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) __u32 qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct gnet_stats_queue qstats = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct gnet_stats_basic_packed bstats = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* Drop lock here it will be reclaimed before touching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * statistics this is required because the d->lock we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * hold here is the look on dev_queue->qdisc_sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * also acquired below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (d->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) spin_unlock_bh(d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) for (i = tc.offset; i < tc.offset + tc.count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct netdev_queue *q = netdev_get_tx_queue(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) spin_lock_bh(qdisc_lock(qdisc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (qdisc_is_percpu_stats(qdisc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) qlen = qdisc_qlen_sum(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) __gnet_stats_copy_basic(NULL, &bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) qdisc->cpu_bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) &qdisc->bstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) __gnet_stats_copy_queue(&qstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) qdisc->cpu_qstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) &qdisc->qstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) qlen += qdisc->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) bstats.bytes += qdisc->bstats.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) bstats.packets += qdisc->bstats.packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) qstats.backlog += qdisc->qstats.backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) qstats.drops += qdisc->qstats.drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) qstats.requeues += qdisc->qstats.requeues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) qstats.overlimits += qdisc->qstats.overlimits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) spin_unlock_bh(qdisc_lock(qdisc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* Reclaim root sleeping lock before completing stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (d->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) spin_lock_bh(d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) sch = dev_queue->qdisc_sleeping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) sch->cpu_bstats, &sch->bstats) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) qdisc_qstats_copy(d, sch) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) unsigned long ntx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (arg->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Walk hierarchy with a virtual class per tc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) arg->count = arg->skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Pad the values and skip over unused traffic classes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (ntx < TC_MAX_QUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) arg->count = TC_MAX_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ntx = TC_MAX_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Reset offset, sort out remaining per-queue qdiscs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (arg->fn(sch, ntx + 1, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static const struct Qdisc_class_ops mqprio_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) .graft = mqprio_graft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) .leaf = mqprio_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) .find = mqprio_find,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) .walk = mqprio_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) .dump = mqprio_dump_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) .dump_stats = mqprio_dump_class_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) .select_queue = mqprio_select_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) .cl_ops = &mqprio_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) .id = "mqprio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) .priv_size = sizeof(struct mqprio_sched),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) .init = mqprio_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) .destroy = mqprio_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) .attach = mqprio_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) .dump = mqprio_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static int __init mqprio_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return register_qdisc(&mqprio_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void __exit mqprio_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) unregister_qdisc(&mqprio_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) module_init(mqprio_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) module_exit(mqprio_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) MODULE_LICENSE("GPL");