^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/sch_choke.c CHOKE scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/inet_ecn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <net/red.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/flow_dissector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) CHOKe stateless AQM for fair bandwidth allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) =================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unresponsive flows) is a variant of RED that penalizes misbehaving flows but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) maintains no flow state. The difference from RED is an additional step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) during the enqueuing process. If average queue size is over the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) low threshold (qmin), a packet is chosen at random from the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) If both the new and chosen packet are from the same flow, both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) needs to access packets in queue randomly. It has a minimal class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) interface to allow overriding the builtin flow classifier with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) filters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) Source:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) IEEE INFOCOM, 2000.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) Characteristics", IEEE/ACM Transactions on Networking, 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Upper bound on size of sk_buff table (packets) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CHOKE_MAX_QUEUE (128*1024 - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct choke_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u32 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned char flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct red_parms parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct red_vars vars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 prob_drop; /* Early probability drops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 prob_mark; /* Early probability marks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u32 forced_drop; /* Forced drops, qavg > max_thresh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 forced_mark; /* Forced marks, qavg > max_thresh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 pdrop; /* Drops due to queue limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u32 other; /* Drops due to drop() calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 matched; /* Drops to flow match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) } stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned int tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int tab_mask; /* size - 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct sk_buff **tab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* number of elements in queue including holes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static unsigned int choke_len(const struct choke_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return (q->tail - q->head) & q->tab_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Is ECN parameter configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int use_ecn(const struct choke_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return q->flags & TC_RED_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Should packets over max just be dropped (versus marked) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int use_harddrop(const struct choke_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return q->flags & TC_RED_HARDDROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Move head pointer forward to skip over holes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void choke_zap_head_holes(struct choke_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) q->head = (q->head + 1) & q->tab_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (q->head == q->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) } while (q->tab[q->head] == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Move tail pointer backwards to reuse holes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void choke_zap_tail_holes(struct choke_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) q->tail = (q->tail - 1) & q->tab_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (q->head == q->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) } while (q->tab[q->tail] == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Drop packet from queue array by creating a "hole" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct choke_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct sk_buff *skb = q->tab[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) q->tab[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (idx == q->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) choke_zap_head_holes(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (idx == q->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) choke_zap_tail_holes(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) --sch->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct choke_skb_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u8 keys_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct flow_keys_digest keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Compare flow of two packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Returns true only if source and destination address and port match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * false for special cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static bool choke_match_flow(struct sk_buff *skb1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct sk_buff *skb2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct flow_keys temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (skb1->protocol != skb2->protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (!choke_skb_cb(skb1)->keys_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) choke_skb_cb(skb1)->keys_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) skb_flow_dissect_flow_keys(skb1, &temp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!choke_skb_cb(skb2)->keys_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) choke_skb_cb(skb2)->keys_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) skb_flow_dissect_flow_keys(skb2, &temp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return !memcmp(&choke_skb_cb(skb1)->keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) &choke_skb_cb(skb2)->keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) sizeof(choke_skb_cb(skb1)->keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Select a packet at random from queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * HACK: since queue can have holes from previous deletion; retry several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * times to find a random skb but then just give up and return the head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Will return NULL if queue is empty (q->head == q->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned int *pidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int retrys = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) skb = q->tab[*pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) } while (--retrys > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return q->tab[*pidx = q->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Compare new packet with random packet in queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * returns true if matched and sets *pidx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static bool choke_match_random(const struct choke_sched_data *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct sk_buff *nskb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned int *pidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct sk_buff *oskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (q->head == q->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) oskb = choke_peek_random(q, pidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return choke_match_flow(oskb, nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct choke_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) const struct red_parms *p = &q->parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) choke_skb_cb(skb)->keys_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* Compute average queue usage (see RED) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (red_is_idling(&q->vars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) red_end_of_idle_period(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Is queue small? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (q->vars.qavg <= p->qth_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) q->vars.qcount = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Draw a packet at random from queue and compare flow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (choke_match_random(q, skb, &idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) q->stats.matched++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) choke_drop_by_idx(sch, idx, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) goto congestion_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Queue is large, always mark/drop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (q->vars.qavg > p->qth_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) q->vars.qcount = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) qdisc_qstats_overlimit(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (use_harddrop(q) || !use_ecn(q) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) !INET_ECN_set_ce(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) q->stats.forced_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto congestion_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) q->stats.forced_mark++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) } else if (++q->vars.qcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) q->vars.qcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) q->vars.qR = red_random(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) qdisc_qstats_overlimit(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) q->stats.prob_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) goto congestion_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) q->stats.prob_mark++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) q->vars.qR = red_random(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Admit new packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (sch->q.qlen < q->limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) q->tab[q->tail] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) q->tail = (q->tail + 1) & q->tab_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ++sch->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) qdisc_qstats_backlog_inc(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) q->stats.pdrop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) congestion_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return NET_XMIT_CN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static struct sk_buff *choke_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct choke_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (q->head == q->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!red_is_idling(&q->vars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) red_start_of_idle_period(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) skb = q->tab[q->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) q->tab[q->head] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) choke_zap_head_holes(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) --sch->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void choke_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct choke_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) while (q->head != q->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct sk_buff *skb = q->tab[q->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) q->head = (q->head + 1) & q->tab_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rtnl_qdisc_drop(skb, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) sch->q.qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) sch->qstats.backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (q->tab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) q->head = q->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) red_restart(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static void choke_free(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) kvfree(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static int choke_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct choke_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct nlattr *tb[TCA_CHOKE_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) const struct tc_red_qopt *ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct sk_buff **old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) u32 max_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u8 *stab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (opt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) choke_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (tb[TCA_CHOKE_PARMS] == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) tb[TCA_CHOKE_STAB] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ctl = nla_data(tb[TCA_CHOKE_PARMS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) stab = nla_data(tb[TCA_CHOKE_STAB]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (ctl->limit > CHOKE_MAX_QUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) mask = roundup_pow_of_two(ctl->limit + 1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (mask != q->tab_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct sk_buff **ntab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ntab = kvcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!ntab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) old = q->tab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) unsigned int oqlen = sch->q.qlen, tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned dropped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) while (q->head != q->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct sk_buff *skb = q->tab[q->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) q->head = (q->head + 1) & q->tab_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (tail < mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ntab[tail++] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) dropped += qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) --sch->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) rtnl_qdisc_drop(skb, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) q->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) q->tail = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) q->tab_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) q->tab = ntab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) q->flags = ctl->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) q->limit = ctl->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ctl->Plog, ctl->Scell_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) stab,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) max_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) red_set_vars(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (q->head == q->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) red_end_of_idle_period(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) choke_free(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int choke_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return choke_change(sch, opt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct choke_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct nlattr *opts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct tc_red_qopt opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) .limit = q->limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) .flags = q->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) .qth_min = q->parms.qth_min >> q->parms.Wlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) .qth_max = q->parms.qth_max >> q->parms.Wlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .Wlog = q->parms.Wlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) .Plog = q->parms.Plog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) .Scell_log = q->parms.Scell_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (opts == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return nla_nest_end(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) nla_nest_cancel(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct choke_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct tc_choke_xstats st = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .early = q->stats.prob_drop + q->stats.forced_drop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) .marked = q->stats.prob_mark + q->stats.forced_mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) .pdrop = q->stats.pdrop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .other = q->stats.other,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) .matched = q->stats.matched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return gnet_stats_copy_app(d, &st, sizeof(st));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static void choke_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct choke_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) choke_free(q->tab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static struct sk_buff *choke_peek_head(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct choke_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return (q->head != q->tail) ? q->tab[q->head] : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .id = "choke",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .priv_size = sizeof(struct choke_sched_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .enqueue = choke_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) .dequeue = choke_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) .peek = choke_peek_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .init = choke_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .destroy = choke_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .reset = choke_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .change = choke_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .dump = choke_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) .dump_stats = choke_dump_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static int __init choke_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return register_qdisc(&choke_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void __exit choke_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) unregister_qdisc(&choke_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) module_init(choke_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) module_exit(choke_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) MODULE_LICENSE("GPL");