Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * net/sched/sch_drr.c         Deficit Round Robin scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <net/sch_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) struct drr_class {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct Qdisc_class_common	common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	unsigned int			filter_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct gnet_stats_basic_packed		bstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	struct gnet_stats_queue		qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct net_rate_estimator __rcu *rate_est;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct list_head		alist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct Qdisc			*qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	u32				quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	u32				deficit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) struct drr_sched {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct list_head		active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct tcf_proto __rcu		*filter_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct tcf_block		*block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct Qdisc_class_hash		clhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct Qdisc_class_common *clc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	clc = qdisc_class_find(&q->clhash, classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	if (clc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	return container_of(clc, struct drr_class, common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			    struct nlattr **tca, unsigned long *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	struct drr_class *cl = (struct drr_class *)*arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct nlattr *opt = tca[TCA_OPTIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct nlattr *tb[TCA_DRR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	u32 quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	if (!opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 					  extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (tb[TCA_DRR_QUANTUM]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		if (quantum == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		quantum = psched_mtu(qdisc_dev(sch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (cl != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		if (tca[TCA_RATE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			err = gen_replace_estimator(&cl->bstats, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 						    &cl->rate_est,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 						    NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 						    qdisc_root_sleeping_running(sch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 						    tca[TCA_RATE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		if (tb[TCA_DRR_QUANTUM])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			cl->quantum = quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (cl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	cl->common.classid = classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	cl->quantum	   = quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 					       &pfifo_qdisc_ops, classid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 					       NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	if (cl->qdisc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		cl->qdisc = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		qdisc_hash_add(cl->qdisc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (tca[TCA_RATE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 					    NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 					    qdisc_root_sleeping_running(sch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 					    tca[TCA_RATE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			qdisc_put(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			kfree(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	qdisc_class_hash_insert(&q->clhash, &cl->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	qdisc_class_hash_grow(sch, &q->clhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	*arg = (unsigned long)cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	gen_kill_estimator(&cl->rate_est);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	qdisc_put(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	kfree(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	struct drr_class *cl = (struct drr_class *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (cl->filter_cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	qdisc_purge_queue(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	qdisc_class_hash_remove(&q->clhash, &cl->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	drr_destroy_class(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	return (unsigned long)drr_find_class(sch, classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 				       struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (cl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	return q->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 				  u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	struct drr_class *cl = drr_find_class(sch, classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (cl != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		cl->filter_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return (unsigned long)cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	struct drr_class *cl = (struct drr_class *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	cl->filter_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			   struct Qdisc *new, struct Qdisc **old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			   struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct drr_class *cl = (struct drr_class *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (new == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 					cl->common.classid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			new = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	*old = qdisc_replace(sch, new, &cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	struct drr_class *cl = (struct drr_class *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return cl->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct drr_class *cl = (struct drr_class *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	list_del(&cl->alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			  struct sk_buff *skb, struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	struct drr_class *cl = (struct drr_class *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	tcm->tcm_parent	= TC_H_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	tcm->tcm_handle	= cl->common.classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	tcm->tcm_info	= cl->qdisc->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	return nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 				struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	struct drr_class *cl = (struct drr_class *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	__u32 qlen = qdisc_qlen_sum(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	struct Qdisc *cl_q = cl->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct tc_drr_stats xstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	memset(&xstats, 0, sizeof(xstats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		xstats.deficit = cl->deficit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 				  d, NULL, &cl->bstats) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	    gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	struct drr_class *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (arg->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	for (i = 0; i < q->clhash.hashsize; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			if (arg->count < arg->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 				arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 				      int *qerr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct drr_class *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct tcf_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct tcf_proto *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		cl = drr_find_class(sch, skb->priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		if (cl != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			return cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	fl = rcu_dereference_bh(q->filter_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	result = tcf_classify(skb, fl, &res, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (result >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		case TC_ACT_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		case TC_ACT_STOLEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		case TC_ACT_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		case TC_ACT_SHOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		cl = (struct drr_class *)res.class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		if (cl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			cl = drr_find_class(sch, res.classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		return cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		       struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	unsigned int len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct drr_class *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	bool first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	cl = drr_classify(skb, sch, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	if (cl == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		if (err & __NET_XMIT_BYPASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		__qdisc_drop(skb, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	first = !cl->qdisc->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	if (unlikely(err != NET_XMIT_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		if (net_xmit_drop_count(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			cl->qstats.drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		list_add_tail(&cl->alist, &q->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		cl->deficit = cl->quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	sch->qstats.backlog += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static struct sk_buff *drr_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	struct drr_class *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	if (list_empty(&q->active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		cl = list_first_entry(&q->active, struct drr_class, alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		skb = cl->qdisc->ops->peek(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 			qdisc_warn_nonwc(__func__, cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		if (len <= cl->deficit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			cl->deficit -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			skb = qdisc_dequeue_peeked(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			if (unlikely(skb == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			if (cl->qdisc->q.qlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 				list_del(&cl->alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			bstats_update(&cl->bstats, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		cl->deficit += cl->quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		list_move_tail(&cl->alist, &q->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			  struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	err = qdisc_class_hash_init(&q->clhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	INIT_LIST_HEAD(&q->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void drr_reset_qdisc(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	struct drr_class *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	for (i = 0; i < q->clhash.hashsize; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			if (cl->qdisc->q.qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 				list_del(&cl->alist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			qdisc_reset(cl->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	sch->qstats.backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	sch->q.qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static void drr_destroy_qdisc(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	struct drr_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	struct drr_class *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	struct hlist_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	tcf_block_put(q->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	for (i = 0; i < q->clhash.hashsize; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 					  common.hnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			drr_destroy_class(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	qdisc_class_hash_destroy(&q->clhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static const struct Qdisc_class_ops drr_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	.change		= drr_change_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	.delete		= drr_delete_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	.find		= drr_search_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	.tcf_block	= drr_tcf_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	.bind_tcf	= drr_bind_tcf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	.unbind_tcf	= drr_unbind_tcf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	.graft		= drr_graft_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	.leaf		= drr_class_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	.qlen_notify	= drr_qlen_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	.dump		= drr_dump_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	.dump_stats	= drr_dump_class_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	.walk		= drr_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	.cl_ops		= &drr_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	.id		= "drr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	.priv_size	= sizeof(struct drr_sched),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	.enqueue	= drr_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	.dequeue	= drr_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	.peek		= qdisc_peek_dequeued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	.init		= drr_init_qdisc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	.reset		= drr_reset_qdisc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	.destroy	= drr_destroy_qdisc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int __init drr_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	return register_qdisc(&drr_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static void __exit drr_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	unregister_qdisc(&drr_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) module_init(drr_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) module_exit(drr_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) MODULE_LICENSE("GPL");