Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * net/sched/sch_ets.c         Enhanced Transmission Selection scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * -----------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * The Enhanced Transmission Selection scheduler is a classful queuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * discipline that merges functionality of PRIO and DRR qdiscs in one scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * ETS makes it easy to configure a set of strict and bandwidth-sharing bands to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * implement the transmission selection described in 802.1Qaz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Although ETS is technically classful, it's not possible to add and remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * classes at will. Instead one specifies number of classes, how many are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * PRIO-like and how many DRR-like, and quanta for the latter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * Algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * ---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * The strict classes, if any, are tried for traffic first: first band 0, if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * has no traffic then band 1, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * When there is no traffic in any of the strict queues, the bandwidth-sharing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * ones are tried next. Each band is assigned a deficit counter, initialized to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * "quantum" of that band. ETS maintains a list of active bandwidth-sharing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * bands whose qdiscs are non-empty. A packet is dequeued from the band at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * head of the list if the packet size is smaller or equal to the deficit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * counter. If the counter is too small, it is increased by "quantum" and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * scheduler moves on to the next band in the active list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <net/gen_stats.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <net/sch_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) struct ets_class {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct list_head alist; /* In struct ets_sched.active. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	u32 quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	u32 deficit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct gnet_stats_basic_packed bstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct gnet_stats_queue qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) struct ets_sched {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct list_head active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct tcf_proto __rcu *filter_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct tcf_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	unsigned int nbands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	unsigned int nstrict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	u8 prio2band[TC_PRIO_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct ets_class classes[TCQ_ETS_MAX_BANDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static const struct nla_policy ets_policy[TCA_ETS_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	[TCA_ETS_NBANDS] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	[TCA_ETS_NSTRICT] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	[TCA_ETS_QUANTA] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	[TCA_ETS_PRIOMAP] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static const struct nla_policy ets_priomap_policy[TCA_ETS_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	[TCA_ETS_PRIOMAP_BAND] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static const struct nla_policy ets_quanta_policy[TCA_ETS_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	[TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	[TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			     unsigned int *quantum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			     struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	*quantum = nla_get_u32(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (!*quantum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		NL_SET_ERR_MSG(extack, "ETS quantum cannot be zero");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static struct ets_class *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) ets_class_from_arg(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return &q->classes[arg - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static u32 ets_class_id(struct Qdisc *sch, const struct ets_class *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	int band = cl - q->classes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	return TC_H_MAKE(sch->handle, band + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void ets_offload_change(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct tc_ets_qopt_offload qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	unsigned int w_psum_prev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	unsigned int q_psum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	unsigned int q_sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	unsigned int quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	unsigned int w_psum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	unsigned int weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	qopt.command = TC_ETS_REPLACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	qopt.handle = sch->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	qopt.parent = sch->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	qopt.replace_params.bands = q->nbands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	qopt.replace_params.qstats = &sch->qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	memcpy(&qopt.replace_params.priomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	       q->prio2band, sizeof(q->prio2band));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	for (i = 0; i < q->nbands; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		q_sum += q->classes[i].quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	for (i = 0; i < q->nbands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		quantum = q->classes[i].quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		q_psum += quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		w_psum = quantum ? q_psum * 100 / q_sum : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		weight = w_psum - w_psum_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		w_psum_prev = w_psum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		qopt.replace_params.quanta[i] = quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		qopt.replace_params.weights[i] = weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static void ets_offload_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	struct tc_ets_qopt_offload qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	qopt.command = TC_ETS_DESTROY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	qopt.handle = sch->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	qopt.parent = sch->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static void ets_offload_graft(struct Qdisc *sch, struct Qdisc *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			      struct Qdisc *old, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			      struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct tc_ets_qopt_offload qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	qopt.command = TC_ETS_GRAFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	qopt.handle = sch->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	qopt.parent = sch->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	qopt.graft_params.band = arg - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	qopt.graft_params.child_handle = new->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	qdisc_offload_graft_helper(dev, sch, new, old, TC_SETUP_QDISC_ETS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 				   &qopt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int ets_offload_dump(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct tc_ets_qopt_offload qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	qopt.command = TC_ETS_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	qopt.handle = sch->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	qopt.parent = sch->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	qopt.stats.bstats = &sch->bstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	qopt.stats.qstats = &sch->qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_ETS, &qopt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	unsigned int band = cl - q->classes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return band < q->nstrict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int ets_class_change(struct Qdisc *sch, u32 classid, u32 parentid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			    struct nlattr **tca, unsigned long *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct ets_class *cl = ets_class_from_arg(sch, *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	struct nlattr *opt = tca[TCA_OPTIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	struct nlattr *tb[TCA_ETS_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	unsigned int quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	/* Classes can be added and removed only through Qdisc_ops.change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	 * interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (!cl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		NL_SET_ERR_MSG(extack, "Fine-grained class addition and removal is not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (!opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		NL_SET_ERR_MSG(extack, "ETS options are required for this operation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_class_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if (!tb[TCA_ETS_QUANTA_BAND])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		/* Nothing to configure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (ets_class_is_strict(q, cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		NL_SET_ERR_MSG(extack, "Strict bands do not have a configurable quantum");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	err = ets_quantum_parse(sch, tb[TCA_ETS_QUANTA_BAND], &quantum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 				extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	cl->quantum = quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	ets_offload_change(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static int ets_class_graft(struct Qdisc *sch, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			   struct Qdisc *new, struct Qdisc **old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			   struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	struct ets_class *cl = ets_class_from_arg(sch, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 					ets_class_id(sch, cl), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			new = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			qdisc_hash_add(new, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	*old = qdisc_replace(sch, new, &cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	ets_offload_graft(sch, new, *old, arg, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static struct Qdisc *ets_class_leaf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	struct ets_class *cl = ets_class_from_arg(sch, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	return cl->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static unsigned long ets_class_find(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	unsigned long band = TC_H_MIN(classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (band - 1 >= q->nbands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	return band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct ets_class *cl = ets_class_from_arg(sch, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	/* We get notified about zero-length child Qdiscs as well if they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 * offloaded. Those aren't on the active list though, so don't attempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * to remove them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	if (!ets_class_is_strict(q, cl) && sch->q.qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		list_del(&cl->alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			  struct sk_buff *skb, struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	struct ets_class *cl = ets_class_from_arg(sch, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	tcm->tcm_parent = TC_H_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	tcm->tcm_handle = ets_class_id(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	tcm->tcm_info = cl->qdisc->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (!ets_class_is_strict(q, cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND, cl->quantum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	return nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int ets_class_dump_stats(struct Qdisc *sch, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 				struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	struct ets_class *cl = ets_class_from_arg(sch, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	struct Qdisc *cl_q = cl->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 				  d, NULL, &cl_q->bstats) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	    qdisc_qstats_copy(d, cl_q) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void ets_qdisc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (arg->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	for (i = 0; i < q->nbands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		if (arg->count < arg->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		if (arg->fn(sch, i + 1, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static struct tcf_block *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ets_qdisc_tcf_block(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	if (cl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		NL_SET_ERR_MSG(extack, "ETS classid must be zero");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	return q->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static unsigned long ets_qdisc_bind_tcf(struct Qdisc *sch, unsigned long parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 					u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	return ets_class_find(sch, classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void ets_qdisc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 				      int *qerr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	u32 band = skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	struct tcf_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	struct tcf_proto *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	if (TC_H_MAJ(skb->priority) != sch->handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		fl = rcu_dereference_bh(q->filter_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		err = tcf_classify(skb, fl, &res, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		case TC_ACT_STOLEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		case TC_ACT_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		case TC_ACT_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		case TC_ACT_SHOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		if (!fl || err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			if (TC_H_MAJ(band))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 				band = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			return &q->classes[q->prio2band[band & TC_PRIO_MAX]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		band = res.classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	band = TC_H_MIN(band) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (band >= q->nbands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		return &q->classes[q->prio2band[0]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	return &q->classes[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			     struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	unsigned int len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	struct ets_class *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	bool first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	cl = ets_classify(skb, sch, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	if (!cl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		if (err & __NET_XMIT_BYPASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		__qdisc_drop(skb, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	first = !cl->qdisc->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	if (unlikely(err != NET_XMIT_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		if (net_xmit_drop_count(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			cl->qstats.drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	if (first && !ets_class_is_strict(q, cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		list_add_tail(&cl->alist, &q->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		cl->deficit = cl->quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	sch->qstats.backlog += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ets_qdisc_dequeue_skb(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	struct ets_class *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	unsigned int band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		for (band = 0; band < q->nstrict; band++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			cl = &q->classes[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			skb = qdisc_dequeue_peeked(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 			if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 				return ets_qdisc_dequeue_skb(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		if (list_empty(&q->active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		cl = list_first_entry(&q->active, struct ets_class, alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		skb = cl->qdisc->ops->peek(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			qdisc_warn_nonwc(__func__, cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		if (len <= cl->deficit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			cl->deficit -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			skb = qdisc_dequeue_peeked(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 			if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			if (cl->qdisc->q.qlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 				list_del(&cl->alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			return ets_qdisc_dequeue_skb(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		cl->deficit += cl->quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		list_move_tail(&cl->alist, &q->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int ets_qdisc_priomap_parse(struct nlattr *priomap_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 				   unsigned int nbands, u8 *priomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 				   struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	const struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	int prio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	u8 band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	err = __nla_validate_nested(priomap_attr, TCA_ETS_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 				    ets_priomap_policy, NL_VALIDATE_STRICT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 				    extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	nla_for_each_nested(attr, priomap_attr, rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		switch (nla_type(attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		case TCA_ETS_PRIOMAP_BAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 			if (prio > TC_PRIO_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 				NL_SET_ERR_MSG_MOD(extack, "Too many priorities in ETS priomap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			band = nla_get_u8(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			if (band >= nbands) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 				NL_SET_ERR_MSG_MOD(extack, "Invalid band number in ETS priomap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 			priomap[prio++] = band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			WARN_ON_ONCE(1); /* Validate should have caught this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static int ets_qdisc_quanta_parse(struct Qdisc *sch, struct nlattr *quanta_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 				  unsigned int nbands, unsigned int nstrict,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 				  unsigned int *quanta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 				  struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	const struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	int band = nstrict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	err = __nla_validate_nested(quanta_attr, TCA_ETS_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 				    ets_quanta_policy, NL_VALIDATE_STRICT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 				    extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	nla_for_each_nested(attr, quanta_attr, rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		switch (nla_type(attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		case TCA_ETS_QUANTA_BAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			if (band >= nbands) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 				NL_SET_ERR_MSG_MOD(extack, "ETS quanta has more values than bands");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			err = ets_quantum_parse(sch, attr, &quanta[band++],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 						extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 			WARN_ON_ONCE(1); /* Validate should have caught this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	unsigned int quanta[TCQ_ETS_MAX_BANDS] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	struct Qdisc *queues[TCQ_ETS_MAX_BANDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	struct nlattr *tb[TCA_ETS_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	unsigned int oldbands = q->nbands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	u8 priomap[TC_PRIO_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	unsigned int nstrict = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	unsigned int nbands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	if (!opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		NL_SET_ERR_MSG(extack, "ETS options are required for this operation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	if (!tb[TCA_ETS_NBANDS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		NL_SET_ERR_MSG_MOD(extack, "Number of bands is a required argument");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	nbands = nla_get_u8(tb[TCA_ETS_NBANDS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	if (nbands < 1 || nbands > TCQ_ETS_MAX_BANDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of bands");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	/* Unless overridden, traffic goes to the last band. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	memset(priomap, nbands - 1, sizeof(priomap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	if (tb[TCA_ETS_NSTRICT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		nstrict = nla_get_u8(tb[TCA_ETS_NSTRICT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		if (nstrict > nbands) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 			NL_SET_ERR_MSG_MOD(extack, "Invalid number of strict bands");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	if (tb[TCA_ETS_PRIOMAP]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 		err = ets_qdisc_priomap_parse(tb[TCA_ETS_PRIOMAP],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 					      nbands, priomap, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	if (tb[TCA_ETS_QUANTA]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		err = ets_qdisc_quanta_parse(sch, tb[TCA_ETS_QUANTA],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 					     nbands, nstrict, quanta, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	/* If there are more bands than strict + quanta provided, the remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	 * ones are ETS with quantum of MTU. Initialize the missing values here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	for (i = nstrict; i < nbands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		if (!quanta[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 			quanta[i] = psched_mtu(qdisc_dev(sch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	/* Before commit, make sure we can allocate all new qdiscs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	for (i = oldbands; i < nbands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 					      ets_class_id(sch, &q->classes[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 					      extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		if (!queues[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 			while (i > oldbands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 				qdisc_put(queues[--i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	q->nbands = nbands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	for (i = nstrict; i < q->nstrict; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		INIT_LIST_HEAD(&q->classes[i].alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		if (q->classes[i].qdisc->q.qlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 			list_add_tail(&q->classes[i].alist, &q->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 			q->classes[i].deficit = quanta[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	for (i = q->nbands; i < oldbands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 			list_del(&q->classes[i].alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		qdisc_tree_flush_backlog(q->classes[i].qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	q->nstrict = nstrict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	memcpy(q->prio2band, priomap, sizeof(priomap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	for (i = 0; i < q->nbands; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		q->classes[i].quantum = quanta[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	for (i = oldbands; i < q->nbands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		q->classes[i].qdisc = queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 		if (q->classes[i].qdisc != &noop_qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 			qdisc_hash_add(q->classes[i].qdisc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	ets_offload_change(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	for (i = q->nbands; i < oldbands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		qdisc_put(q->classes[i].qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		memset(&q->classes[i], 0, sizeof(q->classes[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static int ets_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 			  struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	INIT_LIST_HEAD(&q->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	return ets_qdisc_change(sch, opt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static void ets_qdisc_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	int band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	for (band = q->nstrict; band < q->nbands; band++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 		if (q->classes[band].qdisc->q.qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 			list_del(&q->classes[band].alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	for (band = 0; band < q->nbands; band++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		qdisc_reset(q->classes[band].qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	sch->qstats.backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	sch->q.qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static void ets_qdisc_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	int band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	ets_offload_destroy(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	tcf_block_put(q->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	for (band = 0; band < q->nbands; band++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		qdisc_put(q->classes[band].qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	struct ets_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	struct nlattr *opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	int band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	err = ets_offload_dump(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	if (!opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 		goto nla_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 		goto nla_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	if (q->nstrict &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	    nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 		goto nla_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	if (q->nbands > q->nstrict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		nest = nla_nest_start(skb, TCA_ETS_QUANTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			goto nla_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		for (band = q->nstrict; band < q->nbands; band++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 			if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 					q->classes[band].quantum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 				goto nla_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	nest = nla_nest_start(skb, TCA_ETS_PRIOMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		goto nla_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	for (prio = 0; prio <= TC_PRIO_MAX; prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 			goto nla_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	return nla_nest_end(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) nla_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	nla_nest_cancel(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static const struct Qdisc_class_ops ets_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	.change		= ets_class_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	.graft		= ets_class_graft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	.leaf		= ets_class_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	.find		= ets_class_find,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	.qlen_notify	= ets_class_qlen_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	.dump		= ets_class_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	.dump_stats	= ets_class_dump_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	.walk		= ets_qdisc_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	.tcf_block	= ets_qdisc_tcf_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	.bind_tcf	= ets_qdisc_bind_tcf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	.unbind_tcf	= ets_qdisc_unbind_tcf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static struct Qdisc_ops ets_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	.cl_ops		= &ets_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	.id		= "ets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	.priv_size	= sizeof(struct ets_sched),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	.enqueue	= ets_qdisc_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	.dequeue	= ets_qdisc_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	.peek		= qdisc_peek_dequeued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	.change		= ets_qdisc_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	.init		= ets_qdisc_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	.reset		= ets_qdisc_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	.destroy	= ets_qdisc_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	.dump		= ets_qdisc_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static int __init ets_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	return register_qdisc(&ets_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static void __exit ets_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	unregister_qdisc(&ets_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) module_init(ets_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) module_exit(ets_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) MODULE_LICENSE("GPL");