Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * net/sched/sch_sfb.c	  Stochastic Fair Blue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * A New Class of Active Queue Management Algorithms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * U. Michigan CSE-TR-387-99, April 1999.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/siphash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <net/inet_ecn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * This implementation uses L = 8 and N = 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * This permits us to split one 32bit hash (provided per packet by rxhash or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * external classifier) into 8 subhashes of 4 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define SFB_BUCKET_SHIFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define SFB_NUMBUCKETS	(1 << SFB_BUCKET_SHIFT) /* N bins per Level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define SFB_LEVELS	(32 / SFB_BUCKET_SHIFT) /* L */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /* SFB algo uses a virtual queue, named "bin" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) struct sfb_bucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	u16		qlen; /* length of virtual queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	u16		p_mark; /* marking probability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /* We use a double buffering right before hash change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * (Section 4.4 of SFB reference : moving hash functions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) struct sfb_bins {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	siphash_key_t	  perturbation; /* siphash key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) struct sfb_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct Qdisc	*qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct tcf_proto __rcu *filter_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct tcf_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	unsigned long	rehash_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	unsigned long	warmup_time;	/* double buffering warmup time in jiffies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	u32		max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	u32		bin_size;	/* maximum queue length per bin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	u32		increment;	/* d1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	u32		decrement;	/* d2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	u32		limit;		/* HARD maximal queue length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u32		penalty_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	u32		penalty_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	u32		tokens_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	unsigned long	rehash_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	unsigned long	token_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u8		slot;		/* current active bins (0 or 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	bool		double_buffering;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct sfb_bins bins[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		u32	earlydrop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		u32	penaltydrop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		u32	bucketdrop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		u32	queuedrop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		u32	childdrop;	/* drops in child qdisc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		u32	marked;		/* ECN mark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	} stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * Each queued skb might be hashed on one or two bins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * We store in skb_cb the two hash values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * (A zero value means double buffering was not used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) struct sfb_skb_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	u32 hashes[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * If using 'internal' SFB flow classifier, hash comes from skb rxhash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * If using external classifier, hash comes from the classid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	return sfb_skb_cb(skb)->hashes[slot];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Probabilities are coded as Q0.16 fixed-point values,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * with 0xFFFF representing 65535/65536 (almost 1.0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * Addition and subtraction are saturating in [0, 65535]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static u32 prob_plus(u32 p1, u32 p2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	u32 res = p1 + p2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	return min_t(u32, res, SFB_MAX_PROB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static u32 prob_minus(u32 p1, u32 p2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return p1 > p2 ? p1 - p2 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	for (i = 0; i < SFB_LEVELS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		u32 hash = sfbhash & SFB_BUCKET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		sfbhash >>= SFB_BUCKET_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		if (b[hash].qlen < 0xFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			b[hash].qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		b += SFB_NUMBUCKETS; /* next level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	u32 sfbhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	sfbhash = sfb_hash(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (sfbhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		increment_one_qlen(sfbhash, 0, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	sfbhash = sfb_hash(skb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (sfbhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		increment_one_qlen(sfbhash, 1, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void decrement_one_qlen(u32 sfbhash, u32 slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			       struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	for (i = 0; i < SFB_LEVELS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		u32 hash = sfbhash & SFB_BUCKET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		sfbhash >>= SFB_BUCKET_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (b[hash].qlen > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			b[hash].qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		b += SFB_NUMBUCKETS; /* next level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	u32 sfbhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	sfbhash = sfb_hash(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (sfbhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		decrement_one_qlen(sfbhash, 0, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	sfbhash = sfb_hash(skb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	if (sfbhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		decrement_one_qlen(sfbhash, 1, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	b->p_mark = prob_minus(b->p_mark, q->decrement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	b->p_mark = prob_plus(b->p_mark, q->increment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void sfb_zero_all_buckets(struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	memset(&q->bins, 0, sizeof(q->bins));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * compute max qlen, max p_mark, and avg p_mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	u32 qlen = 0, prob = 0, totalpm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		if (qlen < b->qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			qlen = b->qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		totalpm += b->p_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		if (prob < b->p_mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			prob = b->p_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		b++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	*prob_r = prob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	get_random_bytes(&q->bins[slot].perturbation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			 sizeof(q->bins[slot].perturbation));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void sfb_swap_slot(struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	sfb_init_perturbation(q->slot, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	q->slot ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	q->double_buffering = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Non elastic flows are allowed to use part of the bandwidth, expressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * in "penalty_rate" packets per second, with "penalty_burst" burst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (q->penalty_rate == 0 || q->penalty_burst == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	if (q->tokens_avail < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		unsigned long age = min(10UL * HZ, jiffies - q->token_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		q->tokens_avail = (age * q->penalty_rate) / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		if (q->tokens_avail > q->penalty_burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			q->tokens_avail = q->penalty_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		q->token_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		if (q->tokens_avail < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	q->tokens_avail--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			 int *qerr, u32 *salt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	struct tcf_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	result = tcf_classify(skb, fl, &res, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (result >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		case TC_ACT_STOLEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		case TC_ACT_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		case TC_ACT_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		case TC_ACT_SHOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		*salt = TC_H_MIN(res.classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		       struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	struct Qdisc *child = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct tcf_proto *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	u32 p_min = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	u32 minqlen = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	u32 r, sfbhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	u32 slot = q->slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	if (unlikely(sch->q.qlen >= q->limit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		qdisc_qstats_overlimit(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		q->stats.queuedrop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	if (q->rehash_interval > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		unsigned long limit = q->rehash_time + q->rehash_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		if (unlikely(time_after(jiffies, limit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			sfb_swap_slot(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			q->rehash_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 				    time_after(jiffies, limit - q->warmup_time))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			q->double_buffering = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	fl = rcu_dereference_bh(q->filter_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	if (fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		u32 salt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		/* If using external classifiers, get result and record it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		if (!sfb_classify(skb, fl, &ret, &salt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			goto other_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	if (!sfbhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		sfbhash = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	for (i = 0; i < SFB_LEVELS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		u32 hash = sfbhash & SFB_BUCKET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		sfbhash >>= SFB_BUCKET_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		if (b->qlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			decrement_prob(b, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		else if (b->qlen >= q->bin_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			increment_prob(b, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		if (minqlen > b->qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			minqlen = b->qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		if (p_min > b->p_mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			p_min = b->p_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	slot ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	sfb_skb_cb(skb)->hashes[slot] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	if (unlikely(minqlen >= q->max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		qdisc_qstats_overlimit(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		q->stats.bucketdrop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	if (unlikely(p_min >= SFB_MAX_PROB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		/* Inelastic flow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		if (q->double_buffering) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			sfbhash = skb_get_hash_perturb(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			    &q->bins[slot].perturbation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			if (!sfbhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 				sfbhash = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			for (i = 0; i < SFB_LEVELS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 				u32 hash = sfbhash & SFB_BUCKET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 				sfbhash >>= SFB_BUCKET_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 				if (b->qlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 					decrement_prob(b, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 				else if (b->qlen >= q->bin_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 					increment_prob(b, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		if (sfb_rate_limit(skb, q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			qdisc_qstats_overlimit(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			q->stats.penaltydrop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		goto enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	r = prandom_u32() & SFB_MAX_PROB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	if (unlikely(r < p_min)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		if (unlikely(p_min > SFB_MAX_PROB / 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			/* If we're marking that many packets, then either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			 * this flow is unresponsive, or we're badly congested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			 * In either case, we want to start dropping packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 			if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 				q->stats.earlydrop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 				goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		if (INET_ECN_set_ce(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			q->stats.marked++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			q->stats.earlydrop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) enqueue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	ret = qdisc_enqueue(skb, child, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (likely(ret == NET_XMIT_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		qdisc_qstats_backlog_inc(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		increment_qlen(skb, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	} else if (net_xmit_drop_count(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		q->stats.childdrop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	return NET_XMIT_CN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) other_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	if (ret & __NET_XMIT_BYPASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	struct Qdisc *child = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	skb = child->dequeue(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		decrement_qlen(skb, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static struct sk_buff *sfb_peek(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	struct Qdisc *child = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	return child->ops->peek(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void sfb_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	qdisc_reset(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	sch->qstats.backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	sch->q.qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	q->slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	q->double_buffering = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	sfb_zero_all_buckets(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	sfb_init_perturbation(0, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static void sfb_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	tcf_block_put(q->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	qdisc_put(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	[TCA_SFB_PARMS]	= { .len = sizeof(struct tc_sfb_qopt) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static const struct tc_sfb_qopt sfb_default_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	.rehash_interval = 600 * MSEC_PER_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	.warmup_time = 60 * MSEC_PER_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	.limit = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	.max = 25,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	.bin_size = 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	.decrement = (SFB_MAX_PROB + 3000) / 6000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	.penalty_rate = 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	.penalty_burst = 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		      struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	struct Qdisc *child, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	struct nlattr *tb[TCA_SFB_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	const struct tc_sfb_qopt *ctl = &sfb_default_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	u32 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	if (opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 						  sfb_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		if (tb[TCA_SFB_PARMS] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		ctl = nla_data(tb[TCA_SFB_PARMS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	limit = ctl->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	if (limit == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		limit = qdisc_dev(sch)->tx_queue_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	if (IS_ERR(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		return PTR_ERR(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	if (child != &noop_qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		qdisc_hash_add(child, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	qdisc_purge_queue(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	old = q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	q->qdisc = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	q->rehash_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	q->limit = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	q->increment = ctl->increment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	q->decrement = ctl->decrement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	q->max = ctl->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	q->bin_size = ctl->bin_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	q->penalty_rate = ctl->penalty_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	q->penalty_burst = ctl->penalty_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	q->tokens_avail = ctl->penalty_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	q->token_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	q->slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	q->double_buffering = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	sfb_zero_all_buckets(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	sfb_init_perturbation(0, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	sfb_init_perturbation(1, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	qdisc_put(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	q->qdisc = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	return sfb_change(sch, opt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	struct nlattr *opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	struct tc_sfb_qopt opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		.rehash_interval = jiffies_to_msecs(q->rehash_interval),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		.warmup_time = jiffies_to_msecs(q->warmup_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		.limit = q->limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		.max = q->max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		.bin_size = q->bin_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		.increment = q->increment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		.decrement = q->decrement,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		.penalty_rate = q->penalty_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		.penalty_burst = q->penalty_burst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	sch->qstats.backlog = q->qdisc->qstats.backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (opts == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	return nla_nest_end(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	nla_nest_cancel(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	struct tc_sfb_xstats st = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		.earlydrop = q->stats.earlydrop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		.penaltydrop = q->stats.penaltydrop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		.bucketdrop = q->stats.bucketdrop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		.queuedrop = q->stats.queuedrop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		.childdrop = q->stats.childdrop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		.marked = q->stats.marked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	return gnet_stats_copy_app(d, &st, sizeof(st));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 			  struct sk_buff *skb, struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		     struct Qdisc **old, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		new = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	*old = qdisc_replace(sch, new, &q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	return q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 			    struct nlattr **tca, unsigned long *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static int sfb_delete(struct Qdisc *sch, unsigned long cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	if (!walker->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		if (walker->count >= walker->skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 			if (walker->fn(sch, 1, walker) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 				walker->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		walker->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 				       struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	struct sfb_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	if (cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	return q->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 			      u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static const struct Qdisc_class_ops sfb_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	.graft		=	sfb_graft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	.leaf		=	sfb_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	.find		=	sfb_find,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	.change		=	sfb_change_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	.delete		=	sfb_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	.walk		=	sfb_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	.tcf_block	=	sfb_tcf_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	.bind_tcf	=	sfb_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	.unbind_tcf	=	sfb_unbind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	.dump		=	sfb_dump_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	.id		=	"sfb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	.priv_size	=	sizeof(struct sfb_sched_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	.cl_ops		=	&sfb_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	.enqueue	=	sfb_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	.dequeue	=	sfb_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	.peek		=	sfb_peek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	.init		=	sfb_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	.reset		=	sfb_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	.destroy	=	sfb_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	.change		=	sfb_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	.dump		=	sfb_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	.dump_stats	=	sfb_dump_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	.owner		=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static int __init sfb_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	return register_qdisc(&sfb_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static void __exit sfb_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	unregister_qdisc(&sfb_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) module_init(sfb_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) module_exit(sfb_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) MODULE_AUTHOR("Juliusz Chroboczek");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) MODULE_AUTHOR("Eric Dumazet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) MODULE_LICENSE("GPL");