Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * net/sched/sch_cbs.c	Credit Based Shaper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Authors:	Vinicius Costa Gomes <vinicius.gomes@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) /* Credit Based Shaper (CBS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * =========================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * This is a simple rate-limiting shaper aimed at TSN applications on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * systems with known traffic workloads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * Its algorithm is defined by the IEEE 802.1Q-2014 Specification,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Section 8.6.8.2, and explained in more detail in the Annex L of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * same specification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * There are four tunables to be considered:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *	'idleslope': Idleslope is the rate of credits that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *	accumulated (in kilobits per second) when there is at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *	one packet waiting for transmission. Packets are transmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *	when the current value of credits is equal or greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *	zero. When there is no packet to be transmitted the amount of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *	credits is set to zero. This is the main tunable of the CBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *	algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *	'sendslope':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *	Sendslope is the rate of credits that is depleted (it should be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *	negative number of kilobits per second) when a transmission is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *	ocurring. It can be calculated as follows, (IEEE 802.1Q-2014 Section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *	8.6.8.2 item g):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *	sendslope = idleslope - port_transmit_rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *	'hicredit': Hicredit defines the maximum amount of credits (in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *	bytes) that can be accumulated. Hicredit depends on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *	characteristics of interfering traffic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *	'max_interference_size' is the maximum size of any burst of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *	traffic that can delay the transmission of a frame that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *	available for transmission for this traffic class, (IEEE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *	802.1Q-2014 Annex L, Equation L-3):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *	hicredit = max_interference_size * (idleslope / port_transmit_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *	'locredit': Locredit is the minimum amount of credits that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *	be reached. It is a function of the traffic flowing through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *	this qdisc (IEEE 802.1Q-2014 Annex L, Equation L-2):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  *	locredit = max_frame_size * (sendslope / port_transmit_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #include <net/netevent.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #include <net/sch_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static LIST_HEAD(cbs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static DEFINE_SPINLOCK(cbs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define BYTES_PER_KBIT (1000LL / 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) struct cbs_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	bool offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	int queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	atomic64_t port_rate; /* in bytes/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	s64 last; /* timestamp in ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	s64 credits; /* in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	s32 locredit; /* in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	s32 hicredit; /* in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	s64 sendslope; /* in bytes/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	s64 idleslope; /* in bytes/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct qdisc_watchdog watchdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		       struct sk_buff **to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	struct sk_buff *(*dequeue)(struct Qdisc *sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	struct list_head cbs_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			     struct Qdisc *child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			     struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	unsigned int len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	err = child->ops->enqueue(skb, child, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	if (err != NET_XMIT_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	sch->qstats.backlog += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			       struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct Qdisc *qdisc = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return cbs_child_enqueue(skb, sch, qdisc, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			    struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct Qdisc *qdisc = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (sch->q.qlen == 0 && q->credits > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		/* We need to stop accumulating credits when there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		 * no enqueued packets and q->credits is positive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		q->credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		q->last = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return cbs_child_enqueue(skb, sch, qdisc, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		       struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	return q->enqueue(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* timediff is in ns, slope is in bytes/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static s64 timediff_to_credits(s64 timediff, s64 slope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	return div64_s64(timediff * slope, NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static s64 delay_from_credits(s64 credits, s64 slope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	if (unlikely(slope == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		return S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	return div64_s64(-credits * NSEC_PER_SEC, slope);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static s64 credits_from_len(unsigned int len, s64 slope, s64 port_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (unlikely(port_rate == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		return S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	return div64_s64(len * slope, port_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static struct sk_buff *cbs_child_dequeue(struct Qdisc *sch, struct Qdisc *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	skb = child->ops->dequeue(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct Qdisc *qdisc = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	s64 now = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	s64 credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	/* The previous packet is still being sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (now < q->last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		qdisc_watchdog_schedule_ns(&q->watchdog, q->last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (q->credits < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		credits = timediff_to_credits(now - q->last, q->idleslope);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		credits = q->credits + credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		q->credits = min_t(s64, credits, q->hicredit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		if (q->credits < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			s64 delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			delay = delay_from_credits(q->credits, q->idleslope);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			qdisc_watchdog_schedule_ns(&q->watchdog, now + delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			q->last = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	skb = cbs_child_dequeue(sch, qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	/* As sendslope is a negative number, this will decrease the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 * amount of q->credits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	credits = credits_from_len(len, q->sendslope,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				   atomic64_read(&q->port_rate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	credits += q->credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	q->credits = max_t(s64, credits, q->locredit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	/* Estimate of the transmission of the last byte of the packet in ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (unlikely(atomic64_read(&q->port_rate) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		q->last = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		q->last = now + div64_s64(len * NSEC_PER_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 					  atomic64_read(&q->port_rate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static struct sk_buff *cbs_dequeue_offload(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	struct Qdisc *qdisc = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	return cbs_child_dequeue(sch, qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static struct sk_buff *cbs_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return q->dequeue(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static const struct nla_policy cbs_policy[TCA_CBS_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	[TCA_CBS_PARMS]	= { .len = sizeof(struct tc_cbs_qopt) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void cbs_disable_offload(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				struct cbs_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct tc_cbs_qopt_offload cbs = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	const struct net_device_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (!q->offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	q->enqueue = cbs_enqueue_soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	q->dequeue = cbs_dequeue_soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	ops = dev->netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (!ops->ndo_setup_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	cbs.queue = q->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	cbs.enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		pr_warn("Couldn't disable CBS offload for queue %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			cbs.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			      const struct tc_cbs_qopt *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			      struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	const struct net_device_ops *ops = dev->netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct tc_cbs_qopt_offload cbs = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	if (!ops->ndo_setup_tc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	cbs.queue = q->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	cbs.enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	cbs.hicredit = opt->hicredit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	cbs.locredit = opt->locredit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	cbs.idleslope = opt->idleslope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	cbs.sendslope = opt->sendslope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	q->enqueue = cbs_enqueue_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	q->dequeue = cbs_dequeue_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	struct ethtool_link_ksettings ecmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	int speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	int port_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	err = __ethtool_get_link_ksettings(dev, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		speed = ecmd.base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	port_rate = speed * 1000 * BYTES_PER_KBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	atomic64_set(&q->port_rate, port_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		   dev->name, (long long)atomic64_read(&q->port_rate),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		   ecmd.base.speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int cbs_dev_notifier(struct notifier_block *nb, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			    void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	struct cbs_sched_data *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	struct net_device *qdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (event != NETDEV_UP && event != NETDEV_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	spin_lock(&cbs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	list_for_each_entry(q, &cbs_list, cbs_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		qdev = qdisc_dev(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		if (qdev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	spin_unlock(&cbs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		cbs_set_port_rate(dev, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		      struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	struct nlattr *tb[TCA_CBS_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct tc_cbs_qopt *qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	err = nla_parse_nested_deprecated(tb, TCA_CBS_MAX, opt, cbs_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 					  extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (!tb[TCA_CBS_PARMS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	qopt = nla_data(tb[TCA_CBS_PARMS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	if (!qopt->offload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		cbs_set_port_rate(dev, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		cbs_disable_offload(dev, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		err = cbs_enable_offload(dev, q, qopt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	/* Everything went OK, save the parameters used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	q->hicredit = qopt->hicredit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	q->locredit = qopt->locredit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	q->idleslope = qopt->idleslope * BYTES_PER_KBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	q->sendslope = qopt->sendslope * BYTES_PER_KBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	q->offload = qopt->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	if (!opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		NL_SET_ERR_MSG(extack, "Missing CBS qdisc options  which are mandatory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 				     sch->handle, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (!q->qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	spin_lock(&cbs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	list_add(&q->cbs_list, &cbs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	spin_unlock(&cbs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	qdisc_hash_add(q->qdisc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	q->enqueue = cbs_enqueue_soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	q->dequeue = cbs_dequeue_soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	qdisc_watchdog_init(&q->watchdog, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	return cbs_change(sch, opt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void cbs_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	/* Nothing to do if we couldn't create the underlying qdisc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	if (!q->qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	qdisc_watchdog_cancel(&q->watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	cbs_disable_offload(dev, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	spin_lock(&cbs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	list_del(&q->cbs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	spin_unlock(&cbs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	qdisc_put(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	struct tc_cbs_qopt opt = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	opt.hicredit = q->hicredit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	opt.locredit = q->locredit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	opt.sendslope = div64_s64(q->sendslope, BYTES_PER_KBIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	opt.idleslope = div64_s64(q->idleslope, BYTES_PER_KBIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	opt.offload = q->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	return nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int cbs_dump_class(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			  struct sk_buff *skb, struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	if (cl != 1 || !q->qdisc)	/* only one class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	tcm->tcm_handle |= TC_H_MIN(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	tcm->tcm_info = q->qdisc->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static int cbs_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		     struct Qdisc **old, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 					sch->handle, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			new = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	*old = qdisc_replace(sch, new, &q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static struct Qdisc *cbs_leaf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	struct cbs_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	return q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static unsigned long cbs_find(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void cbs_walk(struct Qdisc *sch, struct qdisc_walker *walker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	if (!walker->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		if (walker->count >= walker->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 			if (walker->fn(sch, 1, walker) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 				walker->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		walker->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static const struct Qdisc_class_ops cbs_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	.graft		=	cbs_graft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	.leaf		=	cbs_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	.find		=	cbs_find,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	.walk		=	cbs_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	.dump		=	cbs_dump_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	.id		=	"cbs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	.cl_ops		=	&cbs_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	.priv_size	=	sizeof(struct cbs_sched_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	.enqueue	=	cbs_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	.dequeue	=	cbs_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	.peek		=	qdisc_peek_dequeued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	.init		=	cbs_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	.reset		=	qdisc_reset_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	.destroy	=	cbs_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	.change		=	cbs_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	.dump		=	cbs_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	.owner		=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static struct notifier_block cbs_device_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	.notifier_call = cbs_dev_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int __init cbs_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	err = register_netdevice_notifier(&cbs_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	err = register_qdisc(&cbs_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		unregister_netdevice_notifier(&cbs_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void __exit cbs_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	unregister_qdisc(&cbs_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	unregister_netdevice_notifier(&cbs_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) module_init(cbs_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) module_exit(cbs_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) MODULE_LICENSE("GPL");