^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* net/sched/sch_hhf.c Heavy-Hitter Filter (HHF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2013 Terry Lam <vtlam@google.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/siphash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /* Heavy-Hitter Filter (HHF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Principles :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * in which the heavy-hitter bucket is served with less weight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * In other words, non-heavy-hitters (e.g., short bursts of critical traffic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * are isolated from heavy-hitters (e.g., persistent bulk traffic) and also have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * higher share of bandwidth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * To capture heavy-hitters, we use the "multi-stage filter" algorithm in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * following paper:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * [EV02] C. Estan and G. Varghese, "New Directions in Traffic Measurement and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Accounting", in ACM SIGCOMM, 2002.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Conceptually, a multi-stage filter comprises k independent hash functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * and k counter arrays. Packets are indexed into k counter arrays by k hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * functions, respectively. The counters are then increased by the packet sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Therefore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * - For a heavy-hitter flow: *all* of its k array counters must be large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * - For a non-heavy-hitter flow: some of its k array counters can be large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * due to hash collision with other small flows; however, with high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * probability, not *all* k counters are large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * By the design of the multi-stage filter algorithm, the false negative rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (heavy-hitters getting away uncaptured) is zero. However, the algorithm is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * susceptible to false positives (non-heavy-hitters mistakenly classified as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * heavy-hitters).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Therefore, we also implement the following optimizations to reduce false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * positives by avoiding unnecessary increment of the counter values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * - Optimization O1: once a heavy-hitter is identified, its bytes are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * accounted in the array counters. This technique is called "shielding"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * in Section 3.3.1 of [EV02].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * - Optimization O2: conservative update of counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * (Section 3.3.2 of [EV02]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * New counter value = max {old counter value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * smallest counter value + packet bytes}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Finally, we refresh the counters periodically since otherwise the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * values will keep accumulating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Once a flow is classified as heavy-hitter, we also save its per-flow state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * in an exact-matching flow table so that its subsequent packets can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * dispatched to the heavy-hitter bucket accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * At a high level, this qdisc works as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Given a packet p:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * heavy-hitter flow table, denoted table T, then send p to the heavy-hitter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * - Otherwise, forward p to the multi-stage filter, denoted filter F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * + If F decides that p belongs to a non-heavy-hitter flow, then send p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * to the non-heavy-hitter bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * + Otherwise, if F decides that p belongs to a new heavy-hitter flow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * then set up a new flow entry for the flow-id of p in the table T and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * send p to the heavy-hitter bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * In this implementation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * - T is a fixed-size hash-table with 1024 entries. Hash collision is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * resolved by linked-list chaining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * - F has four counter arrays, each array containing 1024 32-bit counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * That means 4 * 1024 * 32 bits = 16KB of memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * - Since each array in F contains 1024 counters, 10 bits are sufficient to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * index into each array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Hence, instead of having four hash functions, we chop the 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * computed as XOR sum of those three chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * - We need to clear the counter arrays periodically; however, directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * memsetting 16KB of memory can lead to cache eviction and unwanted delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * So by representing each counter by a valid bit, we only need to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * 4K of 1 bit (i.e. 512 bytes) instead of 16KB of memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * - The Deficit Round Robin engine is taken from fq_codel implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * (net/sched/sch_fq_codel.c). Note that wdrr_bucket corresponds to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * fq_codel_flow in fq_codel implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Non-configurable parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define HH_FLOWS_CNT 1024 /* number of entries in exact-matching table T */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define HHF_ARRAYS_CNT 4 /* number of arrays in multi-stage filter F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define HHF_ARRAYS_LEN 1024 /* number of counters in each array of F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define HHF_BIT_MASK_LEN 10 /* masking 10 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define HHF_BIT_MASK 0x3FF /* bitmask of 10 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) enum wdrr_bucket_idx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define hhf_time_before(a, b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (typecheck(u32, a) && typecheck(u32, b) && ((s32)((a) - (b)) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Heavy-hitter per-flow state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct hh_flow_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 hash_id; /* hash of flow-id (e.g. TCP 5-tuple) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 hit_timestamp; /* last time heavy-hitter was seen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct list_head flowchain; /* chaining under hash collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Weighted Deficit Round Robin (WDRR) scheduler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct wdrr_bucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct sk_buff *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct sk_buff *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct list_head bucketchain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int deficit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct hhf_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) siphash_key_t perturbation; /* hash perturbation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u32 drop_overlimit; /* number of times max qdisc packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * limit was hit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct list_head *hh_flows; /* table T (currently active HHs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u32 hh_flows_limit; /* max active HH allocs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 hh_flows_overlimit; /* num of disallowed HH allocs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 hh_flows_total_cnt; /* total admitted HHs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u32 hh_flows_current_cnt; /* total current HHs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 *hhf_arrays[HHF_ARRAYS_CNT]; /* HH filter F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u32 hhf_arrays_reset_timestamp; /* last time hhf_arrays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * was reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long *hhf_valid_bits[HHF_ARRAYS_CNT]; /* shadow valid bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * of hhf_arrays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Similar to the "new_flows" vs. "old_flows" concept in fq_codel DRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct list_head new_buckets; /* list of new buckets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct list_head old_buckets; /* list of old buckets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Configurable HHF parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 hhf_reset_timeout; /* interval to reset counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * arrays in filter F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * (default 40ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u32 hhf_admit_bytes; /* counter thresh to classify as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * HH (default 128KB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * With these default values,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * 128KB / 40ms = 25 Mbps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * i.e., we expect to capture HHs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * sending > 25 Mbps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u32 hhf_evict_timeout; /* aging threshold to evict idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * HHs out of table T. This should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * be large enough to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * reordering during HH eviction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * (default 1s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 hhf_non_hh_weight; /* WDRR weight for non-HHs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * (default 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * i.e., non-HH : HH = 2 : 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static u32 hhf_time_stamp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Looks up a heavy-hitter flow in a chaining list of table T. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static struct hh_flow_state *seek_list(const u32 hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct hhf_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct hh_flow_state *flow, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u32 now = hhf_time_stamp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (list_empty(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) list_for_each_entry_safe(flow, next, head, flowchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (hhf_time_before(prev, now)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* Delete expired heavy-hitters, but preserve one entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * to avoid kzalloc() when next time this slot is hit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (list_is_last(&flow->flowchain, head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) list_del(&flow->flowchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) kfree(flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) q->hh_flows_current_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) } else if (flow->hash_id == hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Returns a flow state entry for a new heavy-hitter. Either reuses an expired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * entry or dynamically alloc a new entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static struct hh_flow_state *alloc_new_hh(struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct hhf_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct hh_flow_state *flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u32 now = hhf_time_stamp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Find an expired heavy-hitter flow entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) list_for_each_entry(flow, head, flowchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (hhf_time_before(prev, now))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) q->hh_flows_overlimit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Create new entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) flow = kzalloc(sizeof(struct hh_flow_state), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) q->hh_flows_current_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) INIT_LIST_HEAD(&flow->flowchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) list_add_tail(&flow->flowchain, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Assigns packets to WDRR buckets. Implements a multi-stage filter to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * classify heavy-hitters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct hhf_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) u32 tmp_hash, hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u32 xorsum, filter_pos[HHF_ARRAYS_CNT], flow_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct hh_flow_state *flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u32 pkt_len, min_hhf_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u32 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u32 now = hhf_time_stamp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* Reset the HHF counter arrays if this is the right time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (hhf_time_before(prev, now)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for (i = 0; i < HHF_ARRAYS_CNT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) q->hhf_arrays_reset_timestamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Get hashed flow-id of the skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) hash = skb_get_hash_perturb(skb, &q->perturbation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Check if this packet belongs to an already established HH flow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) flow_pos = hash & HHF_BIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) flow = seek_list(hash, &q->hh_flows[flow_pos], q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (flow) { /* found its HH flow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) flow->hit_timestamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return WDRR_BUCKET_FOR_HH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* Now pass the packet through the multi-stage filter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) tmp_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) xorsum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) for (i = 0; i < HHF_ARRAYS_CNT - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Split the skb_hash into three 10-bit chunks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) filter_pos[i] = tmp_hash & HHF_BIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) xorsum ^= filter_pos[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) tmp_hash >>= HHF_BIT_MASK_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* The last chunk is computed as XOR sum of other chunks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) filter_pos[HHF_ARRAYS_CNT - 1] = xorsum ^ tmp_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pkt_len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) min_hhf_val = ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) for (i = 0; i < HHF_ARRAYS_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) q->hhf_arrays[i][filter_pos[i]] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) __set_bit(filter_pos[i], q->hhf_valid_bits[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) val = q->hhf_arrays[i][filter_pos[i]] + pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (min_hhf_val > val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) min_hhf_val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Found a new HH iff all counter values > HH admit threshold. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (min_hhf_val > q->hhf_admit_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Just captured a new heavy-hitter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) flow = alloc_new_hh(&q->hh_flows[flow_pos], q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!flow) /* memory alloc problem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return WDRR_BUCKET_FOR_NON_HH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) flow->hash_id = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) flow->hit_timestamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) q->hh_flows_total_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* By returning without updating counters in q->hhf_arrays,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * we implicitly implement "shielding" (see Optimization O1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return WDRR_BUCKET_FOR_HH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* Conservative update of HHF arrays (see Optimization O2). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) for (i = 0; i < HHF_ARRAYS_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) q->hhf_arrays[i][filter_pos[i]] = min_hhf_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return WDRR_BUCKET_FOR_NON_HH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* Removes one skb from head of bucket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct sk_buff *skb = bucket->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) bucket->head = skb->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) skb_mark_not_on_list(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Tail-adds skb to bucket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (bucket->head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) bucket->head = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) bucket->tail->next = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) bucket->tail = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) skb->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct hhf_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct wdrr_bucket *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Always try to drop from heavy-hitters first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!bucket->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (bucket->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct sk_buff *skb = dequeue_head(bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Return id of the bucket from which the packet was dropped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return bucket - q->buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct hhf_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) enum wdrr_bucket_idx idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct wdrr_bucket *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned int prev_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) idx = hhf_classify(skb, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) bucket = &q->buckets[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) bucket_add(bucket, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) qdisc_qstats_backlog_inc(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (list_empty(&bucket->bucketchain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned int weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* The logic of new_buckets vs. old_buckets is the same as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * new_flows vs. old_flows in the implementation of fq_codel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * i.e., short bursts of non-HHs should have strict priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (idx == WDRR_BUCKET_FOR_HH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Always move heavy-hitters to old bucket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) weight = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) list_add_tail(&bucket->bucketchain, &q->old_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) weight = q->hhf_non_hh_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) list_add_tail(&bucket->bucketchain, &q->new_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) bucket->deficit = weight * q->quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (++sch->q.qlen <= sch->limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) prev_backlog = sch->qstats.backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) q->drop_overlimit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Return Congestion Notification only if we dropped a packet from this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (hhf_drop(sch, to_free) == idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return NET_XMIT_CN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* As we dropped a packet, better let upper stack know this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static struct sk_buff *hhf_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct hhf_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct wdrr_bucket *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) begin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) head = &q->new_buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) head = &q->old_buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (list_empty(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) bucket = list_first_entry(head, struct wdrr_bucket, bucketchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (bucket->deficit <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 1 : q->hhf_non_hh_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) bucket->deficit += weight * q->quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) list_move_tail(&bucket->bucketchain, &q->old_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (bucket->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) skb = dequeue_head(bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* Force a pass through old_buckets to prevent starvation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if ((head == &q->new_buckets) && !list_empty(&q->old_buckets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) list_move_tail(&bucket->bucketchain, &q->old_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) list_del_init(&bucket->bucketchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) bucket->deficit -= qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static void hhf_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) while ((skb = hhf_dequeue(sch)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) rtnl_kfree_skbs(skb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static void hhf_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct hhf_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) for (i = 0; i < HHF_ARRAYS_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) kvfree(q->hhf_arrays[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) kvfree(q->hhf_valid_bits[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (!q->hh_flows)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) for (i = 0; i < HH_FLOWS_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct hh_flow_state *flow, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct list_head *head = &q->hh_flows[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (list_empty(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) list_for_each_entry_safe(flow, next, head, flowchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) list_del(&flow->flowchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) kfree(flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) kvfree(q->hh_flows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) [TCA_HHF_BACKLOG_LIMIT] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) [TCA_HHF_QUANTUM] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) [TCA_HHF_HH_FLOWS_LIMIT] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) [TCA_HHF_RESET_TIMEOUT] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) [TCA_HHF_ADMIT_BYTES] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) [TCA_HHF_EVICT_TIMEOUT] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) [TCA_HHF_NON_HH_WEIGHT] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct hhf_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct nlattr *tb[TCA_HHF_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) unsigned int qlen, prev_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) u64 non_hh_quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) u32 new_quantum = q->quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) err = nla_parse_nested_deprecated(tb, TCA_HHF_MAX, opt, hhf_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (tb[TCA_HHF_QUANTUM])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (tb[TCA_HHF_NON_HH_WEIGHT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (tb[TCA_HHF_BACKLOG_LIMIT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) q->quantum = new_quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) q->hhf_non_hh_weight = new_hhf_non_hh_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (tb[TCA_HHF_HH_FLOWS_LIMIT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (tb[TCA_HHF_RESET_TIMEOUT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) q->hhf_reset_timeout = usecs_to_jiffies(us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (tb[TCA_HHF_ADMIT_BYTES])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (tb[TCA_HHF_EVICT_TIMEOUT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) q->hhf_evict_timeout = usecs_to_jiffies(us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) qlen = sch->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) prev_backlog = sch->qstats.backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) while (sch->q.qlen > sch->limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct sk_buff *skb = hhf_dequeue(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) rtnl_kfree_skbs(skb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) prev_backlog - sch->qstats.backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct hhf_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sch->limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) q->quantum = psched_mtu(qdisc_dev(sch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) get_random_bytes(&q->perturbation, sizeof(q->perturbation));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) INIT_LIST_HEAD(&q->new_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) INIT_LIST_HEAD(&q->old_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Configurable HHF parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) q->hhf_reset_timeout = HZ / 25; /* 40 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) q->hhf_admit_bytes = 131072; /* 128 KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) q->hhf_evict_timeout = HZ; /* 1 sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) q->hhf_non_hh_weight = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int err = hhf_change(sch, opt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!q->hh_flows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* Initialize heavy-hitter flow table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) q->hh_flows = kvcalloc(HH_FLOWS_CNT, sizeof(struct list_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!q->hh_flows)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) for (i = 0; i < HH_FLOWS_CNT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) INIT_LIST_HEAD(&q->hh_flows[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Cap max active HHs at twice len of hh_flows table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) q->hh_flows_limit = 2 * HH_FLOWS_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) q->hh_flows_overlimit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) q->hh_flows_total_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) q->hh_flows_current_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Initialize heavy-hitter filter arrays. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) for (i = 0; i < HHF_ARRAYS_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) q->hhf_arrays[i] = kvcalloc(HHF_ARRAYS_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (!q->hhf_arrays[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* Note: hhf_destroy() will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * by our caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) q->hhf_arrays_reset_timestamp = hhf_time_stamp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Initialize valid bits of heavy-hitter filter arrays. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) for (i = 0; i < HHF_ARRAYS_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) BITS_PER_BYTE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!q->hhf_valid_bits[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* Note: hhf_destroy() will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * by our caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* Initialize Weighted DRR buckets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) for (i = 0; i < WDRR_BUCKET_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct wdrr_bucket *bucket = q->buckets + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) INIT_LIST_HEAD(&bucket->bucketchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct hhf_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct nlattr *opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (opts == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) jiffies_to_usecs(q->hhf_reset_timeout)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) jiffies_to_usecs(q->hhf_evict_timeout)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return nla_nest_end(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct hhf_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct tc_hhf_xstats st = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) .drop_overlimit = q->drop_overlimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) .hh_overlimit = q->hh_flows_overlimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) .hh_tot_count = q->hh_flows_total_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) .hh_cur_count = q->hh_flows_current_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return gnet_stats_copy_app(d, &st, sizeof(st));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) .id = "hhf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) .priv_size = sizeof(struct hhf_sched_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) .enqueue = hhf_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) .dequeue = hhf_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) .peek = qdisc_peek_dequeued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) .init = hhf_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) .reset = hhf_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) .destroy = hhf_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) .change = hhf_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) .dump = hhf_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) .dump_stats = hhf_dump_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static int __init hhf_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return register_qdisc(&hhf_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static void __exit hhf_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) unregister_qdisc(&hhf_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) module_init(hhf_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) module_exit(hhf_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) MODULE_AUTHOR("Terry Lam");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) MODULE_AUTHOR("Nandita Dukkipati");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)");