^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/sch_netem.c Network emulator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Many of the algorithms and ideas for this came from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * NIST Net which is not copyrighted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Authors: Stephen Hemminger <shemminger@osdl.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/reciprocal_div.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/inet_ecn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define VERSION "1.3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* Network Emulation Queuing algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ====================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) Network Emulation Tool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) [2] Luigi Rizzo, DummyNet for FreeBSD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) ----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) This started out as a simple way to delay outgoing packets to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) test TCP but has grown to include most of the functionality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) of a full blown network emulator like NISTnet. It can delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) packets and add random jitter (and correlation). The random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) distribution can be loaded from a table as well to provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) normal, Pareto, or experimental curves. Packet loss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) duplication, and reordering can also be emulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) This qdisc does not do classification that can be handled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) layering other disciplines. It does not need to do bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) control either since that can be handled by using token
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bucket or other rate control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) Correlated Loss Generator models
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) Added generation of correlated loss according to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) "Gilbert-Elliot" model, a 4-state markov model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) References:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) and intuitive loss model for packet networks and its implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) in the Netem module in the Linux kernel", available in [1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) Authors: Stefano Salsano <stefano.salsano at uniroma2.it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) Fabio Ludovici <fabio.ludovici at yahoo.it>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct disttable {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) s16 table[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct netem_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* internal t(ime)fifo qdisc uses t_root and sch->limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct rb_root t_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* a linear queue; reduces rbtree rebalancing when jitter is low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct sk_buff *t_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct sk_buff *t_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* optional qdisc for classful handling (NULL at netem init) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct qdisc_watchdog watchdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) s64 latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) s64 jitter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 loss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u32 ecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 gap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 duplicate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u32 corrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u64 rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) s32 packet_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 cell_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct reciprocal_value cell_size_reciprocal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) s32 cell_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct crndstate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u32 last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) u32 rho;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct disttable *delay_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) CLG_RANDOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) CLG_4_STATES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) CLG_GILB_ELL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) } loss_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) TX_IN_GAP_PERIOD = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) TX_IN_BURST_PERIOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) LOST_IN_GAP_PERIOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) LOST_IN_BURST_PERIOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) } _4_state_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) GOOD_STATE = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) BAD_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) } GE_state_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Correlated Loss Generation models */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct clgstate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* state of the Markov chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u8 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* 4-states and Gilbert-Elliot models */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u32 a1; /* p13 for 4-states or p for GE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 a2; /* p31 for 4-states or r for GE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u32 a3; /* p32 for 4-states or h for GE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 a4; /* p14 for 4-states or 1-k for GE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 a5; /* p23 used only in 4-states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) } clg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct tc_netem_slot slot_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct slotstate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u64 slot_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) s32 packets_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) s32 bytes_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) } slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct disttable *slot_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Time stamp put into socket buffer control block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * Only valid when skbs are in our internal t(ime)fifo queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * and skb->next & skb->prev are scratch space for a qdisc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * we save skb->tstamp value in skb->cb[] before destroying it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct netem_skb_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u64 time_to_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* we assume we can use skb next/prev/tstamp as storage for rb_node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* init_crandom - initialize correlated random number generator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Use entropy source for initial seed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void init_crandom(struct crndstate *state, unsigned long rho)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) state->rho = rho;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) state->last = prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* get_crandom - correlated random number generator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Next number depends on last value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * rho is scaled to avoid floating point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static u32 get_crandom(struct crndstate *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u64 value, rho;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned long answer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!state || state->rho == 0) /* no correlation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) value = prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) rho = (u64)state->rho + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) state->last = answer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return answer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* loss_4state - 4-state model loss generator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Generates losses according to the 4-state Markov chain adopted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * the GI (General and Intuitive) loss model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static bool loss_4state(struct netem_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct clgstate *clg = &q->clg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u32 rnd = prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Makes a comparison between rnd and the transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * probabilities outgoing from the current state, then decides the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * next state and if the next packet has to be transmitted or lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * The four states correspond to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * LOST_IN_BURST_PERIOD => isolated losses within a gap period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * LOST_IN_GAP_PERIOD => lost packets within a burst period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) switch (clg->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) case TX_IN_GAP_PERIOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (rnd < clg->a4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) clg->state = LOST_IN_BURST_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) clg->state = LOST_IN_GAP_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) } else if (clg->a1 + clg->a4 < rnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) clg->state = TX_IN_GAP_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) case TX_IN_BURST_PERIOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (rnd < clg->a5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) clg->state = LOST_IN_GAP_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) clg->state = TX_IN_BURST_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) case LOST_IN_GAP_PERIOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (rnd < clg->a3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) clg->state = TX_IN_BURST_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) clg->state = TX_IN_GAP_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) } else if (clg->a2 + clg->a3 < rnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) clg->state = LOST_IN_GAP_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) case LOST_IN_BURST_PERIOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) clg->state = TX_IN_GAP_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* loss_gilb_ell - Gilbert-Elliot model loss generator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Generates losses according to the Gilbert-Elliot loss model or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * its special cases (Gilbert or Simple Gilbert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * Makes a comparison between random number and the transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * probabilities outgoing from the current state, then decides the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * next state. A second random number is extracted and the comparison
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * with the loss probability of the current state decides if the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * packet will be transmitted or lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static bool loss_gilb_ell(struct netem_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct clgstate *clg = &q->clg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) switch (clg->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) case GOOD_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (prandom_u32() < clg->a1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) clg->state = BAD_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (prandom_u32() < clg->a4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) case BAD_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (prandom_u32() < clg->a2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) clg->state = GOOD_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (prandom_u32() > clg->a3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static bool loss_event(struct netem_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) switch (q->loss_model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case CLG_RANDOM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Random packet drop 0 => none, ~0 => all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return q->loss && q->loss >= get_crandom(&q->loss_cor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case CLG_4_STATES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* 4state loss model algorithm (used also for GI model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * Extracts a value from the markov 4 state loss generator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * if it is 1 drops a packet and if needed writes the event in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * the kernel logs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return loss_4state(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) case CLG_GILB_ELL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Gilbert-Elliot loss model algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * Extracts a value from the Gilbert-Elliot loss generator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * if it is 1 drops a packet and if needed writes the event in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * the kernel logs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return loss_gilb_ell(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return false; /* not reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* tabledist - return a pseudo-randomly distributed value with mean mu and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * std deviation sigma. Uses table lookup to approximate the desired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * distribution, and a uniformly-distributed pseudo-random source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static s64 tabledist(s64 mu, s32 sigma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct crndstate *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) const struct disttable *dist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) s64 x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) long t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u32 rnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (sigma == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return mu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) rnd = get_crandom(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* default uniform distribution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (dist == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) t = dist->table[rnd % dist->size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) x = (sigma % NETEM_DIST_SCALE) * t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (x >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) x += NETEM_DIST_SCALE/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) x -= NETEM_DIST_SCALE/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) len += q->packet_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (q->cell_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (len > cells * q->cell_size) /* extra cell needed for remainder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) cells++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) len = cells * (q->cell_size + q->cell_overhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return div64_u64(len * NSEC_PER_SEC, q->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void tfifo_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct rb_node *p = rb_first(&q->t_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) while (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct sk_buff *skb = rb_to_skb(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) p = rb_next(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) rb_erase(&skb->rbnode, &q->t_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) rtnl_kfree_skbs(skb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) rtnl_kfree_skbs(q->t_head, q->t_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) q->t_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) q->t_tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u64 tnext = netem_skb_cb(nskb)->time_to_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (q->t_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) q->t_tail->next = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) q->t_head = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) q->t_tail = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) skb = rb_to_skb(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (tnext >= netem_skb_cb(skb)->time_to_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) p = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) p = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) rb_link_node(&nskb->rbnode, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) rb_insert_color(&nskb->rbnode, &q->t_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * when we statistically choose to corrupt one, we instead segment it, returning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * the first packet to be corrupted, and re-enqueue the remaining frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct sk_buff *segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) netdev_features_t features = netif_skb_features(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (IS_ERR_OR_NULL(segs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Insert one skb into qdisc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * Note: parent depends on return value to account for queue length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * NET_XMIT_DROP: queue length didn't change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * NET_XMIT_SUCCESS: one skb was queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* We don't fill cb now as skb_unshare() may invalidate it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct netem_skb_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct sk_buff *skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct sk_buff *segs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned int prev_len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int rc = NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int rc_drop = NET_XMIT_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* Do not fool qdisc_drop_all() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) skb->prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* Random duplication */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* Drop packet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (loss_event(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (q->ecn && INET_ECN_set_ce(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) qdisc_qstats_drop(sch); /* mark packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) __qdisc_drop(skb, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* If a delay is expected, orphan the skb. (orphaning usually takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * place at TX completion time, so _before_ the link transit delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (q->latency || q->jitter || q->rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) skb_orphan_partial(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * If we need to duplicate packet, then re-insert at top of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * qdisc tree, since parent queuer expects that only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * skb will be queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct Qdisc *rootq = qdisc_root_bh(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) q->duplicate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) rootq->enqueue(skb2, rootq, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) q->duplicate = dupsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rc_drop = NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * Randomized packet corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Make copy if needed since we are modifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * If packet is going to be hardware checksummed, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * do it now in software before we mangle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) skb = netem_segment(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return rc_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) segs = skb->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) skb_mark_not_on_list(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) qdisc_skb_cb(skb)->pkt_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) skb = skb_unshare(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) goto finish_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (skb->ip_summed == CHECKSUM_PARTIAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) skb_checksum_help(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto finish_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) skb->data[prandom_u32() % skb_headlen(skb)] ^=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 1<<(prandom_u32() % 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (unlikely(sch->q.qlen >= sch->limit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* re-link segs, so that qdisc_drop_all() frees them all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) skb->next = segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) qdisc_drop_all(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return rc_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) qdisc_qstats_backlog_inc(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) cb = netem_skb_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (q->gap == 0 || /* not doing reordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) q->counter < q->gap - 1 || /* inside last reordering gap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) q->reorder < get_crandom(&q->reorder_cor)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) s64 delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) delay = tabledist(q->latency, q->jitter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) &q->delay_cor, q->delay_dist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) now = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (q->rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct netem_skb_cb *last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (sch->q.tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) last = netem_skb_cb(sch->q.tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (q->t_root.rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct sk_buff *t_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct netem_skb_cb *t_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) t_skb = skb_rb_last(&q->t_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) t_last = netem_skb_cb(t_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!last ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) t_last->time_to_send > last->time_to_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) last = t_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (q->t_tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct netem_skb_cb *t_last =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) netem_skb_cb(q->t_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!last ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) t_last->time_to_send > last->time_to_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) last = t_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Last packet in queue is reference point (now),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * calculate this time bonus and subtract
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * from delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) delay -= last->time_to_send - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) delay = max_t(s64, 0, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) now = last->time_to_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) delay += packet_time_ns(qdisc_pkt_len(skb), q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) cb->time_to_send = now + delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ++q->counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) tfifo_enqueue(skb, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * Do re-ordering by putting one out of N packets at the front
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * of the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) cb->time_to_send = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) q->counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __qdisc_enqueue_head(skb, &sch->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) sch->qstats.requeues++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) finish_segs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (segs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned int len, last_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) len = skb ? skb->len : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) nb = skb ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) while (segs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) skb2 = segs->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) skb_mark_not_on_list(segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) qdisc_skb_cb(segs)->pkt_len = segs->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) last_len = segs->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) rc = qdisc_enqueue(segs, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (rc != NET_XMIT_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (net_xmit_drop_count(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) nb++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) len += last_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) segs = skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Parent qdiscs accounted for 1 skb of size @prev_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) } else if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return NET_XMIT_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* Delay the next round with a new future slot with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * correct number of bytes and packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static void get_slot_next(struct netem_sched_data *q, u64 now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) s64 next_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!q->slot_dist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) next_delay = q->slot_config.min_delay +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) (prandom_u32() *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) (q->slot_config.max_delay -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) q->slot_config.min_delay) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) next_delay = tabledist(q->slot_config.dist_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) (s32)(q->slot_config.dist_jitter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) NULL, q->slot_dist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) q->slot.slot_next = now + next_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) q->slot.packets_left = q->slot_config.max_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) q->slot.bytes_left = q->slot_config.max_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static struct sk_buff *netem_peek(struct netem_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct sk_buff *skb = skb_rb_first(&q->t_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) u64 t1, t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return q->t_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!q->t_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) t1 = netem_skb_cb(skb)->time_to_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) t2 = netem_skb_cb(q->t_head)->time_to_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (t1 < t2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return q->t_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (skb == q->t_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) q->t_head = skb->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!q->t_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) q->t_tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) rb_erase(&skb->rbnode, &q->t_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static struct sk_buff *netem_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) tfifo_dequeue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) skb = __qdisc_dequeue_head(&sch->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) deliver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) skb = netem_peek(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) u64 time_to_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) u64 now = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* if more time remaining? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) time_to_send = netem_skb_cb(skb)->time_to_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (q->slot.slot_next && q->slot.slot_next < time_to_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) get_slot_next(q, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (time_to_send <= now && q->slot.slot_next <= now) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) netem_erase_head(q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) skb->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) skb->prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* skb->dev shares skb->rbnode area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * we need to restore its value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) skb->dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (q->slot.slot_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) q->slot.packets_left--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) q->slot.bytes_left -= qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (q->slot.packets_left <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) q->slot.bytes_left <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) get_slot_next(q, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (q->qdisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned int pkt_len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct sk_buff *to_free = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) err = qdisc_enqueue(skb, q->qdisc, &to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) kfree_skb_list(to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (err != NET_XMIT_SUCCESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) net_xmit_drop_count(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) qdisc_tree_reduce_backlog(sch, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) goto tfifo_dequeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) goto deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (q->qdisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) skb = q->qdisc->ops->dequeue(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) goto deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) qdisc_watchdog_schedule_ns(&q->watchdog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) max(time_to_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) q->slot.slot_next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (q->qdisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) skb = q->qdisc->ops->dequeue(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static void netem_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) qdisc_reset_queue(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) tfifo_reset(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (q->qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) qdisc_reset(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) qdisc_watchdog_cancel(&q->watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static void dist_free(struct disttable *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) kvfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * Distribution data is a variable size payload containing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * signed 16 bit values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) const struct nlattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) size_t n = nla_len(attr)/sizeof(__s16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) const __s16 *data = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) spinlock_t *root_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct disttable *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!n || n > NETEM_DIST_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) d->size = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) d->table[i] = data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) root_lock = qdisc_root_sleeping_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) spin_lock_bh(root_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) swap(*tbl, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) spin_unlock_bh(root_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) dist_free(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) const struct tc_netem_slot *c = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) q->slot_config = *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (q->slot_config.max_packets == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) q->slot_config.max_packets = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (q->slot_config.max_bytes == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) q->slot_config.max_bytes = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /* capping dist_jitter to the range acceptable by tabledist() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) q->slot.packets_left = q->slot_config.max_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) q->slot.bytes_left = q->slot_config.max_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (q->slot_config.min_delay | q->slot_config.max_delay |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) q->slot_config.dist_jitter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) q->slot.slot_next = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) q->slot.slot_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) const struct tc_netem_corr *c = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) init_crandom(&q->delay_cor, c->delay_corr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) init_crandom(&q->loss_cor, c->loss_corr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) init_crandom(&q->dup_cor, c->dup_corr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) const struct tc_netem_reorder *r = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) q->reorder = r->probability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) init_crandom(&q->reorder_cor, r->correlation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) const struct tc_netem_corrupt *r = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) q->corrupt = r->probability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) init_crandom(&q->corrupt_cor, r->correlation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) const struct tc_netem_rate *r = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) q->rate = r->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) q->packet_overhead = r->packet_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) q->cell_size = r->cell_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) q->cell_overhead = r->cell_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (q->cell_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) q->cell_size_reciprocal = reciprocal_value(q->cell_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) const struct nlattr *la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) nla_for_each_nested(la, attr, rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) u16 type = nla_type(la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) case NETEM_LOSS_GI: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) const struct tc_netem_gimodel *gi = nla_data(la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) pr_info("netem: incorrect gi model size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) q->loss_model = CLG_4_STATES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) q->clg.state = TX_IN_GAP_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) q->clg.a1 = gi->p13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) q->clg.a2 = gi->p31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) q->clg.a3 = gi->p32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) q->clg.a4 = gi->p14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) q->clg.a5 = gi->p23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) case NETEM_LOSS_GE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) const struct tc_netem_gemodel *ge = nla_data(la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) pr_info("netem: incorrect ge model size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) q->loss_model = CLG_GILB_ELL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) q->clg.state = GOOD_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) q->clg.a1 = ge->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) q->clg.a2 = ge->r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) q->clg.a3 = ge->h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) q->clg.a4 = ge->k1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) pr_info("netem: unknown loss type %u\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) [TCA_NETEM_ECN] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) [TCA_NETEM_RATE64] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) const struct nla_policy *policy, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) int nested_len = nla_len(nla) - NLA_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (nested_len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) pr_info("netem: invalid attributes len %d\n", nested_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (nested_len >= nla_attr_size(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return nla_parse_deprecated(tb, maxtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) nla_data(nla) + NLA_ALIGN(len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) nested_len, policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* Parse netlink message to set options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static int netem_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct nlattr *tb[TCA_NETEM_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct tc_netem_qopt *qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct clgstate old_clg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) int old_loss_model = CLG_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (opt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) qopt = nla_data(opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* backup q->clg and q->loss_model */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) old_clg = q->clg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) old_loss_model = q->loss_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (tb[TCA_NETEM_LOSS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) q->loss_model = old_loss_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) q->loss_model = CLG_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (tb[TCA_NETEM_DELAY_DIST]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ret = get_dist_table(sch, &q->delay_dist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) tb[TCA_NETEM_DELAY_DIST]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto get_table_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (tb[TCA_NETEM_SLOT_DIST]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ret = get_dist_table(sch, &q->slot_dist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) tb[TCA_NETEM_SLOT_DIST]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) goto get_table_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) sch->limit = qopt->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) q->latency = PSCHED_TICKS2NS(qopt->latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) q->jitter = PSCHED_TICKS2NS(qopt->jitter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) q->limit = qopt->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) q->gap = qopt->gap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) q->counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) q->loss = qopt->loss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) q->duplicate = qopt->duplicate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /* for compatibility with earlier versions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * if gap is set, need to assume 100% probability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (q->gap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) q->reorder = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (tb[TCA_NETEM_CORR])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) get_correlation(q, tb[TCA_NETEM_CORR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (tb[TCA_NETEM_REORDER])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) get_reorder(q, tb[TCA_NETEM_REORDER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (tb[TCA_NETEM_CORRUPT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (tb[TCA_NETEM_RATE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) get_rate(q, tb[TCA_NETEM_RATE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (tb[TCA_NETEM_RATE64])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) q->rate = max_t(u64, q->rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) nla_get_u64(tb[TCA_NETEM_RATE64]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (tb[TCA_NETEM_LATENCY64])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (tb[TCA_NETEM_JITTER64])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (tb[TCA_NETEM_ECN])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (tb[TCA_NETEM_SLOT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) get_slot(q, tb[TCA_NETEM_SLOT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* capping jitter to the range acceptable by tabledist() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) get_table_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* recover clg and loss_model, in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * q->clg and q->loss_model were modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * in get_loss_clg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) q->clg = old_clg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) q->loss_model = old_loss_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static int netem_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) qdisc_watchdog_init(&q->watchdog, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) q->loss_model = CLG_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ret = netem_change(sch, opt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) pr_info("netem: change failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static void netem_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) qdisc_watchdog_cancel(&q->watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (q->qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) qdisc_put(q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) dist_free(q->delay_dist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) dist_free(q->slot_dist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static int dump_loss_model(const struct netem_sched_data *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) switch (q->loss_model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) case CLG_RANDOM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* legacy loss model */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return 0; /* no data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) case CLG_4_STATES: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct tc_netem_gimodel gi = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) .p13 = q->clg.a1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) .p31 = q->clg.a2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .p32 = q->clg.a3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .p14 = q->clg.a4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .p23 = q->clg.a5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) case CLG_GILB_ELL: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct tc_netem_gemodel ge = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .p = q->clg.a1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .r = q->clg.a2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) .h = q->clg.a3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) .k1 = q->clg.a4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) const struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct tc_netem_qopt qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct tc_netem_corr cor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct tc_netem_reorder reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct tc_netem_corrupt corrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct tc_netem_rate rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct tc_netem_slot slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) qopt.limit = q->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) qopt.loss = q->loss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) qopt.gap = q->gap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) qopt.duplicate = q->duplicate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) cor.delay_corr = q->delay_cor.rho;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) cor.loss_corr = q->loss_cor.rho;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) cor.dup_corr = q->dup_cor.rho;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) reorder.probability = q->reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) reorder.correlation = q->reorder_cor.rho;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) corrupt.probability = q->corrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) corrupt.correlation = q->corrupt_cor.rho;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (q->rate >= (1ULL << 32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) TCA_NETEM_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) rate.rate = ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) rate.rate = q->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) rate.packet_overhead = q->packet_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) rate.cell_size = q->cell_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) rate.cell_overhead = q->cell_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (dump_loss_model(q, skb) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (q->slot_config.min_delay | q->slot_config.max_delay |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) q->slot_config.dist_jitter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) slot = q->slot_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (slot.max_packets == INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) slot.max_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (slot.max_bytes == INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) slot.max_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return nla_nest_end(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) nlmsg_trim(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct sk_buff *skb, struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (cl != 1 || !q->qdisc) /* only one class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) tcm->tcm_handle |= TC_H_MIN(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) tcm->tcm_info = q->qdisc->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct Qdisc **old, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) *old = qdisc_replace(sch, new, &q->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct netem_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return q->qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static unsigned long netem_find(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!walker->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (walker->count >= walker->skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (walker->fn(sch, 1, walker) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) walker->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) walker->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static const struct Qdisc_class_ops netem_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) .graft = netem_graft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) .leaf = netem_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) .find = netem_find,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) .walk = netem_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) .dump = netem_dump_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) .id = "netem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) .cl_ops = &netem_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) .priv_size = sizeof(struct netem_sched_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) .enqueue = netem_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .dequeue = netem_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) .peek = qdisc_peek_dequeued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) .init = netem_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) .reset = netem_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) .destroy = netem_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) .change = netem_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) .dump = netem_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static int __init netem_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) pr_info("netem: version " VERSION "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return register_qdisc(&netem_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static void __exit netem_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) unregister_qdisc(&netem_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) module_init(netem_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) module_exit(netem_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) MODULE_LICENSE("GPL");