^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __NET_SCHED_GENERIC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __NET_SCHED_GENERIC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dynamic_queue_limits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/hashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/android_kabi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/gen_stats.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/flow_offload.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct Qdisc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct qdisc_walker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct tcf_walker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct bpf_flow_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct qdisc_rate_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct tc_ratespec rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u32 data[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct qdisc_rate_table *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) int refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) enum qdisc_state_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) __QDISC_STATE_SCHED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __QDISC_STATE_DEACTIVATED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) __QDISC_STATE_MISSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct qdisc_size_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct tc_sizespec szopts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u16 data[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* similar to sk_buff_head, but skb->prev pointer is undefined. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct qdisc_skb_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct sk_buff *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct sk_buff *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) __u32 qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct Qdisc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int (*enqueue)(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct sk_buff **to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct sk_buff * (*dequeue)(struct Qdisc *sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define TCQ_F_BUILTIN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define TCQ_F_INGRESS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define TCQ_F_CAN_BYPASS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define TCQ_F_MQROOT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * q->dev_queue : It can test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * netif_xmit_frozen_or_stopped() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * dequeueing next packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Its true for MQ/MQPRIO slaves, or non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * multiqueue device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define TCQ_F_WARN_NONWC (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * qdisc_tree_decrease_qlen() should stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) const struct Qdisc_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct qdisc_size_table __rcu *stab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct hlist_node hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u32 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct netdev_queue *dev_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct net_rate_estimator __rcu *rate_est;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct gnet_stats_basic_cpu __percpu *cpu_bstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct gnet_stats_queue __percpu *cpu_qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) refcount_t refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * For performance sake on SMP, we put highly modified fields at the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct qdisc_skb_head q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct gnet_stats_basic_packed bstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) seqcount_t running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct gnet_stats_queue qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned long state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct Qdisc *next_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct sk_buff_head skb_bad_txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) spinlock_t busylock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spinlock_t seqlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* for NOLOCK qdisc, true if there are no enqueued skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) bool empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* private data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) long privdata[] ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (qdisc->flags & TCQ_F_BUILTIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) refcount_inc(&qdisc->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Intended to be used by unlocked users, when concurrent qdisc release is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (qdisc->flags & TCQ_F_BUILTIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (refcount_inc_not_zero(&qdisc->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static inline bool qdisc_is_running(struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (qdisc->flags & TCQ_F_NOLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return spin_is_locked(&qdisc->seqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return q->flags & TCQ_F_CPUSTATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (qdisc_is_percpu_stats(qdisc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return READ_ONCE(qdisc->empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return !READ_ONCE(qdisc->q.qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline bool qdisc_run_begin(struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (qdisc->flags & TCQ_F_NOLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (spin_trylock(&qdisc->seqlock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) goto nolock_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Paired with smp_mb__after_atomic() to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * STATE_MISSED checking is synchronized with clearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * in pfifo_fast_dequeue().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* If the MISSED flag is set, it means other thread has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * set the MISSED flag before second spin_trylock(), so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * we can return false here to avoid multi cpus doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * the set_bit() and second spin_trylock() concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Set the MISSED flag before the second spin_trylock(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * if the second spin_trylock() return false, it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * other cpu holding the lock will do dequeuing for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * or it will see the MISSED flag set after releasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * lock and reschedule the net_tx_action() to do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * dequeuing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) set_bit(__QDISC_STATE_MISSED, &qdisc->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* spin_trylock() only has load-acquire semantic, so use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * smp_mb__after_atomic() to ensure STATE_MISSED is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * before doing the second spin_trylock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Retry again in case other CPU may not see the new flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * after it releases the lock at the end of qdisc_run_end().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!spin_trylock(&qdisc->seqlock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) nolock_empty:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) WRITE_ONCE(qdisc->empty, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } else if (qdisc_is_running(qdisc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Variant of write_seqcount_begin() telling lockdep a trylock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * was attempted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) raw_write_seqcount_begin(&qdisc->running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static inline void qdisc_run_end(struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) write_seqcount_end(&qdisc->running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (qdisc->flags & TCQ_F_NOLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) spin_unlock(&qdisc->seqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (unlikely(test_bit(__QDISC_STATE_MISSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) &qdisc->state))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) __netif_schedule(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return qdisc->flags & TCQ_F_ONETXQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #ifdef CONFIG_BQL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Non-BQL migrated drivers will return 0, too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return dql_avail(&txq->dql);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct Qdisc_class_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Child qdisc manipulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int (*graft)(struct Qdisc *, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct Qdisc *, struct Qdisc **,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) void (*qlen_notify)(struct Qdisc *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Class manipulation routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned long (*find)(struct Qdisc *, u32 classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int (*change)(struct Qdisc *, u32, u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct nlattr **, unsigned long *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct netlink_ext_ack *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int (*delete)(struct Qdisc *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Filter manipulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct tcf_block * (*tcf_block)(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u32 classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void (*unbind_tcf)(struct Qdisc *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* rtnetlink specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int (*dump)(struct Qdisc *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct sk_buff *skb, struct tcmsg*);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int (*dump_stats)(struct Qdisc *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct gnet_dump *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Qdisc_class_ops flag values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Implements API that doesn't require rtnl lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) enum qdisc_class_ops_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct Qdisc_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct Qdisc_ops *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) const struct Qdisc_class_ops *cl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) char id[IFNAMSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int priv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned int static_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int (*enqueue)(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct sk_buff **to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct sk_buff * (*dequeue)(struct Qdisc *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct sk_buff * (*peek)(struct Qdisc *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int (*init)(struct Qdisc *sch, struct nlattr *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) void (*reset)(struct Qdisc *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void (*destroy)(struct Qdisc *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int (*change)(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct nlattr *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) void (*attach)(struct Qdisc *sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int (*dump)(struct Qdisc *, struct sk_buff *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) void (*ingress_block_set)(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) u32 block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void (*egress_block_set)(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u32 block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u32 (*ingress_block_get)(struct Qdisc *sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u32 (*egress_block_get)(struct Qdisc *sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct module *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct tcf_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u32 classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) const struct tcf_proto *goto_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* used in the skb_tc_reinsert function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) bool ingress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct gnet_stats_queue *qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct tcf_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct tcf_proto_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct list_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) char kind[IFNAMSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int (*classify)(struct sk_buff *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) const struct tcf_proto *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct tcf_result *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int (*init)(struct tcf_proto*);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) void* (*get)(struct tcf_proto*, u32 handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void (*put)(struct tcf_proto *tp, void *f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int (*change)(struct net *net, struct sk_buff *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct tcf_proto*, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u32 handle, struct nlattr **,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) void **, bool, bool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct netlink_ext_ack *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int (*delete)(struct tcf_proto *tp, void *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) bool *last, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct netlink_ext_ack *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) bool (*delete_empty)(struct tcf_proto *tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void (*walk)(struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct tcf_walker *arg, bool rtnl_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int (*reoffload)(struct tcf_proto *tp, bool add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) flow_setup_cb_t *cb, void *cb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) void (*hw_add)(struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) void *type_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) void (*hw_del)(struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void *type_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) void (*bind_class)(void *, u32, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) void *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) void * (*tmplt_create)(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct tcf_chain *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct nlattr **tca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) void (*tmplt_destroy)(void *tmplt_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* rtnetlink specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int (*dump)(struct net*, struct tcf_proto*, void *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct sk_buff *skb, struct tcmsg*,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) bool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int (*terse_dump)(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct tcf_proto *tp, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct tcmsg *t, bool rtnl_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int (*tmplt_dump)(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) void *tmplt_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct module *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * conditions can occur when filters are inserted/deleted simultaneously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) enum tcf_proto_ops_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct tcf_proto {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Fast access part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct tcf_proto __rcu *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) void __rcu *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* called under RCU BH lock*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int (*classify)(struct sk_buff *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) const struct tcf_proto *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct tcf_result *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __be16 protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* All the rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u32 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) const struct tcf_proto_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct tcf_chain *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* Lock protects tcf_proto shared state and can be used by unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * classifiers to protect their private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) bool deleting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) refcount_t refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct hlist_node destroy_ht_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct qdisc_skb_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) unsigned int pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u16 slave_dev_queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) u16 tc_classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #define QDISC_CB_PRIV_LEN 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned char data[QDISC_CB_PRIV_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) u16 mru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct tcf_chain {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Protects filter_chain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct mutex filter_chain_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct tcf_proto __rcu *filter_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct tcf_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) u32 index; /* chain index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) unsigned int refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) unsigned int action_refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bool explicitly_created;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) bool flushing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) const struct tcf_proto_ops *tmplt_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) void *tmplt_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct tcf_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Lock protects tcf_block and lifetime-management data of chains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * attached to the block (refcnt, action_refcnt, explicitly_created).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct list_head chain_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u32 index; /* block index for shared blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u32 classid; /* which class this block belongs to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) refcount_t refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct Qdisc *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct flow_block flow_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct list_head owner_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) bool keep_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) atomic_t offloadcnt; /* Number of oddloaded filters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct tcf_chain *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct list_head filter_chain_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) } chain0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) DECLARE_HASHTABLE(proto_destroy_ht, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #ifdef CONFIG_PROVE_LOCKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return lockdep_is_held(&chain->filter_chain_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return lockdep_is_held(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) #endif /* #ifdef CONFIG_PROVE_LOCKING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #define tcf_chain_dereference(p, chain) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #define tcf_proto_dereference(p, tp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct qdisc_skb_cb *qcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) BUILD_BUG_ON(sizeof(qcb->data) < sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static inline int qdisc_qlen_cpu(const struct Qdisc *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return this_cpu_ptr(q->cpu_qstats)->qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static inline int qdisc_qlen(const struct Qdisc *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return q->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static inline int qdisc_qlen_sum(const struct Qdisc *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) __u32 qlen = q->qstats.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (qdisc_is_percpu_stats(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) qlen += q->q.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return (struct qdisc_skb_cb *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return &qdisc->q.lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return rcu_dereference_bh(qdisc->dev_queue->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return qdisc->dev_queue->qdisc_sleeping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* The qdisc root lock is a mechanism by which to top level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * of a qdisc tree can be locked from any qdisc node in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * forest. This allows changing the configuration of some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * aspect of the qdisc tree while blocking out asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * qdisc access in the packet processing paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * It is only legal to do this when the root will not change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * on us. Otherwise we'll potentially lock the wrong qdisc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * root. This is enforced by holding the RTNL semaphore, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * all users of this lock accessor must do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct Qdisc *root = qdisc_root(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return qdisc_lock(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct Qdisc *root = qdisc_root_sleeping(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return qdisc_lock(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct Qdisc *root = qdisc_root_sleeping(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return &root->running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return qdisc->dev_queue->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static inline void sch_tree_lock(const struct Qdisc *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_lock_bh(qdisc_root_sleeping_lock(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static inline void sch_tree_unlock(const struct Qdisc *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) spin_unlock_bh(qdisc_root_sleeping_lock(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) extern struct Qdisc noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) extern struct Qdisc_ops noop_qdisc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) extern struct Qdisc_ops pfifo_fast_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) extern struct Qdisc_ops mq_qdisc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) extern struct Qdisc_ops noqueue_qdisc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) extern const struct Qdisc_ops *default_qdisc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static inline const struct Qdisc_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) get_default_qdisc_ops(const struct net_device *dev, int ntx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return ntx < dev->real_num_tx_queues ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) default_qdisc_ops : &pfifo_fast_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct Qdisc_class_common {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) u32 classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct hlist_node hnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct Qdisc_class_hash {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct hlist_head *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned int hashsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) unsigned int hashmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned int hashelems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) id ^= id >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) id ^= id >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return id & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static inline struct Qdisc_class_common *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct Qdisc_class_common *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) h = qdisc_class_hash(id, hash->hashmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) hlist_for_each_entry(cl, &hash->hash[h], hnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (cl->classid == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int qdisc_class_hash_init(struct Qdisc_class_hash *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) void qdisc_class_hash_insert(struct Qdisc_class_hash *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct Qdisc_class_common *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) void qdisc_class_hash_remove(struct Qdisc_class_hash *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct Qdisc_class_common *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) int dev_qdisc_change_tx_queue_len(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) void dev_init_scheduler(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) void dev_shutdown(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) void dev_activate(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) void dev_deactivate(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) void dev_deactivate_many(struct list_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct Qdisc *qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) void qdisc_reset(struct Qdisc *qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) void qdisc_put(struct Qdisc *qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) void qdisc_put_unlocked(struct Qdisc *qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) #ifdef CONFIG_NET_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) void *type_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct Qdisc *new, struct Qdisc *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) enum tc_setup_type type, void *type_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) void *type_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) q->flags &= ~TCQ_F_OFFLOADED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct Qdisc *new, struct Qdisc *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) enum tc_setup_type type, void *type_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) const struct Qdisc_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) void qdisc_free(struct Qdisc *qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) const struct Qdisc_ops *ops, u32 parentid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) void __qdisc_calculate_pkt_len(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) const struct qdisc_size_table *stab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int skb_do_redirect(struct sk_buff *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return skb->tc_at_ingress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static inline bool skb_skip_tc_classify(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (skb->tc_skip_classify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) skb->tc_skip_classify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* Reset all TX qdiscs greater than index of a device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) for (; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (qdisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) spin_lock_bh(qdisc_lock(qdisc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) qdisc_reset(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) spin_unlock_bh(qdisc_lock(qdisc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* Are all TX queues of the device empty? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static inline bool qdisc_all_tx_empty(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) for (i = 0; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) const struct Qdisc *q = rcu_dereference(txq->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!qdisc_is_empty(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* Are any of the TX qdiscs changing? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static inline bool qdisc_tx_changing(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) for (i = 0; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* Is the device using the noop qdisc on all queues? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static inline bool qdisc_tx_is_noop(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) for (i = 0; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return qdisc_skb_cb(skb)->pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) enum net_xmit_qdisc_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) __NET_XMIT_STOLEN = 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) __NET_XMIT_BYPASS = 0x00020000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) #define net_xmit_drop_count(e) (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) const struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) #ifdef CONFIG_NET_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (stab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) __qdisc_calculate_pkt_len(skb, stab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) qdisc_calculate_pkt_len(skb, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return sch->enqueue(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) __u64 bytes, __u32 packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) bstats->bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) bstats->packets += packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) _bstats_update(bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) qdisc_pkt_len(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) __u64 bytes, __u32 packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u64_stats_update_begin(&bstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) _bstats_update(&bstats->bstats, bytes, packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) u64_stats_update_end(&bstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) u64_stats_update_begin(&bstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) bstats_update(&bstats->bstats, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) u64_stats_update_end(&bstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static inline void qdisc_bstats_update(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) bstats_update(&sch->bstats, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) sch->qstats.backlog -= qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) sch->qstats.backlog += qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) this_cpu_inc(sch->cpu_qstats->qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) this_cpu_dec(sch->cpu_qstats->qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) this_cpu_inc(sch->cpu_qstats->requeues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) sch->qstats.drops += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) qstats->drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) qstats->overlimits++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) static inline void qdisc_qstats_drop(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) qstats_drop_inc(&sch->qstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) this_cpu_inc(sch->cpu_qstats->drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) sch->qstats.overlimits++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) __u32 qlen = qdisc_qlen_sum(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) __u32 *backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct gnet_stats_queue qstats = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) __u32 len = qdisc_qlen_sum(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) *qlen = qstats.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *backlog = qstats.backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) __u32 qlen, backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) qdisc_tree_reduce_backlog(sch, qlen, backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static inline void qdisc_purge_queue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) __u32 qlen, backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) qdisc_reset(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) qdisc_tree_reduce_backlog(sch, qlen, backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) qh->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) qh->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) qh->qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct qdisc_skb_head *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct sk_buff *last = qh->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) skb->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) last->next = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) qh->tail = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) qh->tail = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) qh->head = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) qh->qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) __qdisc_enqueue_tail(skb, &sch->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) qdisc_qstats_backlog_inc(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static inline void __qdisc_enqueue_head(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct qdisc_skb_head *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) skb->next = qh->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!qh->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) qh->tail = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) qh->head = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) qh->qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct sk_buff *skb = qh->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (likely(skb != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) qh->head = skb->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) qh->qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (qh->head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) qh->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) skb->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (likely(skb != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* Instead of calling kfree_skb() while root qdisc lock is held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * queue the skb for future freeing at end of __dev_xmit_skb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) skb->next = *to_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) *to_free = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static inline void __qdisc_drop_all(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (skb->prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) skb->prev->next = *to_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) skb->next = *to_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) *to_free = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct qdisc_skb_head *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct sk_buff *skb = __qdisc_dequeue_head(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (likely(skb != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) unsigned int len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) __qdisc_drop(skb, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) const struct qdisc_skb_head *qh = &sch->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return qh->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* generic pseudo peek method for non-work-conserving qdisc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct sk_buff *skb = skb_peek(&sch->gso_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) skb = sch->dequeue(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) __skb_queue_head(&sch->gso_skb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* it's still part of the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) qdisc_qstats_backlog_inc(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (qdisc_is_percpu_stats(sch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) qdisc_qstats_cpu_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) qdisc_bstats_cpu_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) qdisc_qstats_cpu_qlen_dec(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) unsigned int pkt_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (qdisc_is_percpu_stats(sch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) qdisc_qstats_cpu_qlen_inc(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) sch->qstats.backlog += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct sk_buff *skb = skb_peek(&sch->gso_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) skb = __skb_dequeue(&sch->gso_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (qdisc_is_percpu_stats(sch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) qdisc_qstats_cpu_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) qdisc_qstats_cpu_qlen_dec(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) skb = sch->dequeue(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * We do not know the backlog in bytes of this list, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * is up to the caller to correct it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (qh->qlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) rtnl_kfree_skbs(qh->head, qh->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) qh->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) qh->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) qh->qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static inline void qdisc_reset_queue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) __qdisc_reset_queue(&sch->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) sch->qstats.backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct Qdisc **pold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct Qdisc *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) old = *pold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) *pold = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (old != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) qdisc_purge_queue(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) rtnl_kfree_skbs(skb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) __qdisc_drop(skb, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) qdisc_qstats_cpu_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return NET_XMIT_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) __qdisc_drop(skb, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return NET_XMIT_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) __qdisc_drop_all(skb, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return NET_XMIT_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) long it will take to send a packet given its size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (slot < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) slot >>= rtab->rate.cell_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (slot > 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return rtab->data[slot];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct psched_ratecfg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) u64 rate_bytes_ps; /* bytes per second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) u32 mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) u16 overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) u16 mpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) u8 linklayer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) u8 shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) len += r->overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (len < r->mpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) len = r->mpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return ((u64)len * r->mult) >> r->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) void psched_ratecfg_precompute(struct psched_ratecfg *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) const struct tc_ratespec *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) u64 rate64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) const struct psched_ratecfg *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) memset(res, 0, sizeof(*res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /* legacy struct tc_ratespec has a 32bit @rate field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * Qdisc using 64bit rate should add new attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * in order to maintain compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) res->overhead = r->overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) res->mpu = r->mpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * The fast path only needs to access filter list and to update stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct mini_Qdisc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct tcf_proto *filter_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct tcf_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct gnet_stats_basic_cpu __percpu *cpu_bstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct gnet_stats_queue __percpu *cpu_qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) this_cpu_inc(miniq->cpu_qstats->drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct mini_Qdisc_pair {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct mini_Qdisc miniq1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct mini_Qdisc miniq2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct mini_Qdisc __rcu **p_miniq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct tcf_proto *tp_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct mini_Qdisc __rcu **p_miniq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct tcf_block *block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) #endif