Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * net/sched/sch_red.c	Random Early Detection queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Changes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * J Hadi Salim 980914:	computation fixes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * J Hadi Salim 980816:  ECN support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <net/inet_ecn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <net/red.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /*	Parameters, settable by user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	-----------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	limit		- bytes (must be > qth_max + burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	Hard limit on queue length, should be chosen >qth_max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	to allow packet bursts. This parameter does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	affect the algorithms behaviour and can be chosen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	arbitrarily high (well, less than ram size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	Really, this limit will never be reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	if RED works correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) struct red_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	u32			limit;		/* HARD maximal queue length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	unsigned char		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/* Non-flags in tc_red_qopt.flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	unsigned char		userbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct timer_list	adapt_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct Qdisc		*sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct red_parms	parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	struct red_vars		vars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct red_stats	stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct Qdisc		*qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct tcf_qevent	qe_early_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct tcf_qevent	qe_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static inline int red_use_ecn(struct red_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	return q->flags & TC_RED_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static inline int red_use_harddrop(struct red_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	return q->flags & TC_RED_HARDDROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static int red_use_nodrop(struct red_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	return q->flags & TC_RED_NODROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		       struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	struct Qdisc *child = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	q->vars.qavg = red_calc_qavg(&q->parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				     &q->vars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 				     child->qstats.backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (red_is_idling(&q->vars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		red_end_of_idle_period(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	case RED_DONT_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	case RED_PROB_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		qdisc_qstats_overlimit(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		if (!red_use_ecn(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			q->stats.prob_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			goto congestion_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		if (INET_ECN_set_ce(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			q->stats.prob_mark++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 			skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 				return NET_XMIT_CN | ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		} else if (!red_use_nodrop(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			q->stats.prob_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			goto congestion_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		/* Non-ECT packet in ECN nodrop mode: queue it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	case RED_HARD_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		qdisc_qstats_overlimit(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		if (red_use_harddrop(q) || !red_use_ecn(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			q->stats.forced_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			goto congestion_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		if (INET_ECN_set_ce(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			q->stats.forced_mark++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 				return NET_XMIT_CN | ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		} else if (!red_use_nodrop(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			q->stats.forced_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			goto congestion_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		/* Non-ECT packet in ECN nodrop mode: queue it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	ret = qdisc_enqueue(skb, child, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (likely(ret == NET_XMIT_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		qdisc_qstats_backlog_inc(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	} else if (net_xmit_drop_count(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		q->stats.pdrop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) congestion_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return NET_XMIT_CN | ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	return NET_XMIT_CN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static struct sk_buff *red_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	struct Qdisc *child = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	skb = child->dequeue(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		if (!red_is_idling(&q->vars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			red_start_of_idle_period(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static struct sk_buff *red_peek(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct Qdisc *child = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	return child->ops->peek(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static void red_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	qdisc_reset(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	sch->qstats.backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	sch->q.qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	red_restart(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static int red_offload(struct Qdisc *sch, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	struct tc_red_qopt_offload opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		.handle = sch->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		.parent = sch->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		opt.command = TC_RED_REPLACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		opt.set.min = q->parms.qth_min >> q->parms.Wlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		opt.set.max = q->parms.qth_max >> q->parms.Wlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		opt.set.probability = q->parms.max_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		opt.set.limit = q->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		opt.set.is_ecn = red_use_ecn(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		opt.set.is_harddrop = red_use_harddrop(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		opt.set.is_nodrop = red_use_nodrop(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		opt.set.qstats = &sch->qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		opt.command = TC_RED_DESTROY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void red_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	tcf_qevent_destroy(&q->qe_mark, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	tcf_qevent_destroy(&q->qe_early_drop, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	del_timer_sync(&q->adapt_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	red_offload(sch, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	qdisc_put(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	[TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	[TCA_RED_MAX_P] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	[TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	[TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	[TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int __red_change(struct Qdisc *sch, struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	struct Qdisc *old_child = NULL, *child = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct nla_bitfield32 flags_bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct tc_red_qopt *ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	unsigned char userbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	unsigned char flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	u32 max_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	u8 *stab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (tb[TCA_RED_PARMS] == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	    tb[TCA_RED_STAB] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	ctl = nla_data(tb[TCA_RED_PARMS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	stab = nla_data(tb[TCA_RED_STAB]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			      ctl->Scell_log, stab))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			    tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			    &flags_bf, &userbits, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	if (ctl->limit > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 					 extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		if (IS_ERR(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			return PTR_ERR(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		/* child is fifo, no need to check for noop_qdisc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		qdisc_hash_add(child, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	err = red_validate_flags(flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	q->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	q->userbits = userbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	q->limit = ctl->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	if (child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		qdisc_tree_flush_backlog(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		old_child = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		q->qdisc = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	red_set_parms(&q->parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		      ctl->qth_min, ctl->qth_max, ctl->Wlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		      ctl->Plog, ctl->Scell_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		      stab,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		      max_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	red_set_vars(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	del_timer(&q->adapt_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	if (ctl->flags & TC_RED_ADAPTATIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		mod_timer(&q->adapt_timer, jiffies + HZ/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (!q->qdisc->q.qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		red_start_of_idle_period(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	red_offload(sch, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (old_child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		qdisc_put(old_child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		qdisc_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static inline void red_adaptative_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	struct red_sched_data *q = from_timer(q, t, adapt_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	struct Qdisc *sch = q->sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	spin_lock(root_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	red_adaptative_algo(&q->parms, &q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	mod_timer(&q->adapt_timer, jiffies + HZ/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	spin_unlock(root_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static int red_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	struct nlattr *tb[TCA_RED_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	q->qdisc = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	q->sch = sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 					  extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	err = __red_change(sch, tb, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	err = tcf_qevent_init(&q->qe_early_drop, sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			      FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			      tb[TCA_RED_EARLY_DROP_BLOCK], extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	return tcf_qevent_init(&q->qe_mark, sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			       FLOW_BLOCK_BINDER_TYPE_RED_MARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			       tb[TCA_RED_MARK_BLOCK], extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int red_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		      struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	struct nlattr *tb[TCA_RED_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 					  extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	err = tcf_qevent_validate_change(&q->qe_early_drop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 					 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	err = tcf_qevent_validate_change(&q->qe_mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 					 tb[TCA_RED_MARK_BLOCK], extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	return __red_change(sch, tb, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static int red_dump_offload_stats(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	struct tc_red_qopt_offload hw_stats = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		.command = TC_RED_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		.handle = sch->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		.parent = sch->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			.stats.bstats = &sch->bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			.stats.qstats = &sch->qstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	struct nlattr *opts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	struct tc_red_qopt opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		.limit		= q->limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		.flags		= (q->flags & TC_RED_HISTORIC_FLAGS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 				  q->userbits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		.Wlog		= q->parms.Wlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		.Plog		= q->parms.Plog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		.Scell_log	= q->parms.Scell_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	err = red_dump_offload_stats(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	if (opts == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	    nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	    nla_put_bitfield32(skb, TCA_RED_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			       q->flags, TC_RED_SUPPORTED_FLAGS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	    tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	    tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	return nla_nest_end(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	nla_nest_cancel(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	struct tc_red_xstats st = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	if (sch->flags & TCQ_F_OFFLOADED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		struct tc_red_qopt_offload hw_stats_request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			.command = TC_RED_XSTATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			.handle = sch->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			.parent = sch->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 				.xstats = &q->stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 					      &hw_stats_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	st.early = q->stats.prob_drop + q->stats.forced_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	st.pdrop = q->stats.pdrop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	st.other = q->stats.other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	st.marked = q->stats.prob_mark + q->stats.forced_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	return gnet_stats_copy_app(d, &st, sizeof(st));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static int red_dump_class(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 			  struct sk_buff *skb, struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	tcm->tcm_handle |= TC_H_MIN(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	tcm->tcm_info = q->qdisc->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static void red_graft_offload(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			      struct Qdisc *new, struct Qdisc *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			      struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	struct tc_red_qopt_offload graft_offload = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		.handle		= sch->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		.parent		= sch->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		.child_handle	= new->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		.command	= TC_RED_GRAFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 				   TC_SETUP_QDISC_RED, &graft_offload, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		     struct Qdisc **old, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		new = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	*old = qdisc_replace(sch, new, &q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	red_graft_offload(sch, new, *old, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	struct red_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	return q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static unsigned long red_find(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (!walker->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		if (walker->count >= walker->skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			if (walker->fn(sch, 1, walker) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 				walker->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		walker->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static const struct Qdisc_class_ops red_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	.graft		=	red_graft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	.leaf		=	red_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	.find		=	red_find,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	.walk		=	red_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	.dump		=	red_dump_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static struct Qdisc_ops red_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	.id		=	"red",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	.priv_size	=	sizeof(struct red_sched_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	.cl_ops		=	&red_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	.enqueue	=	red_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	.dequeue	=	red_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	.peek		=	red_peek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	.init		=	red_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	.reset		=	red_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	.destroy	=	red_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	.change		=	red_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	.dump		=	red_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	.dump_stats	=	red_dump_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	.owner		=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static int __init red_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	return register_qdisc(&red_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static void __exit red_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	unregister_qdisc(&red_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) module_init(red_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) module_exit(red_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) MODULE_LICENSE("GPL");