^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/sch_gred.c Generic Random Early Detection queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * 991129: - Bug fix with grio mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * - a better sing. AvgQ mode with Grio(WRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * - A finer grained VQ dequeue based on sugestion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * from Ren Liu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * - More error checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * For all the glorious comments look at include/net/red.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/red.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define GRED_DEF_PRIO (MAX_DPs / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define GRED_VQ_MASK (MAX_DPs - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct gred_sched_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct gred_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct gred_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u32 limit; /* HARD maximal queue length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u32 DP; /* the drop parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u32 red_flags; /* virtualQ version of red_flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u64 bytesin; /* bytes seen on virtualQ so far*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u32 packetsin; /* packets seen on virtualQ so far*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 backlog; /* bytes on the virtualQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u8 prio; /* the prio of this vq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct red_parms parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct red_vars vars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct red_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) GRED_WRED_MODE = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) GRED_RIO_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct gred_sched {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct gred_sched_data *tab[MAX_DPs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 red_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u32 DPs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct red_vars wred_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline int gred_wred_mode(struct gred_sched *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return test_bit(GRED_WRED_MODE, &table->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline void gred_enable_wred_mode(struct gred_sched *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __set_bit(GRED_WRED_MODE, &table->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline void gred_disable_wred_mode(struct gred_sched *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __clear_bit(GRED_WRED_MODE, &table->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline int gred_rio_mode(struct gred_sched *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return test_bit(GRED_RIO_MODE, &table->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline void gred_enable_rio_mode(struct gred_sched *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __set_bit(GRED_RIO_MODE, &table->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static inline void gred_disable_rio_mode(struct gred_sched *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __clear_bit(GRED_RIO_MODE, &table->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline int gred_wred_mode_check(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct gred_sched *table = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (i = 0; i < table->DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct gred_sched_data *q = table->tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (q == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) for (n = i + 1; n < table->DPs; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (table->tab[n] && table->tab[n]->prio == q->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline unsigned int gred_backlog(struct gred_sched *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct gred_sched_data *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (gred_wred_mode(table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return sch->qstats.backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return q->backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static inline u16 tc_index_to_dp(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return skb->tc_index & GRED_VQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline void gred_load_wred_set(const struct gred_sched *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct gred_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) q->vars.qavg = table->wred_set.qavg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) q->vars.qidlestart = table->wred_set.qidlestart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline void gred_store_wred_set(struct gred_sched *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct gred_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) table->wred_set.qavg = q->vars.qavg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) table->wred_set.qidlestart = q->vars.qidlestart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int gred_use_ecn(struct gred_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return q->red_flags & TC_RED_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int gred_use_harddrop(struct gred_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return q->red_flags & TC_RED_HARDDROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static bool gred_per_vq_red_flags_used(struct gred_sched *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Local per-vq flags couldn't have been set unless global are 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (table->red_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) for (i = 0; i < MAX_DPs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (table->tab[i] && table->tab[i]->red_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct gred_sched_data *q = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct gred_sched *t = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned long qavg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u16 dp = tc_index_to_dp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) dp = t->def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) q = t->tab[dp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Pass through packets not assigned to a DP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * if no default DP has been configured. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * allows for DP flows to be left untouched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) sch->limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return qdisc_enqueue_tail(skb, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* fix tc_index? --could be controversial but needed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) requeueing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* sum up all the qaves of prios < ours to get the new qave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!gred_wred_mode(t) && gred_rio_mode(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) for (i = 0; i < t->DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (t->tab[i] && t->tab[i]->prio < q->prio &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) !red_is_idling(&t->tab[i]->vars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) qavg += t->tab[i]->vars.qavg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) q->packetsin++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) q->bytesin += qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (gred_wred_mode(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) gred_load_wred_set(t, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) q->vars.qavg = red_calc_qavg(&q->parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) &q->vars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) gred_backlog(t, q, sch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (red_is_idling(&q->vars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) red_end_of_idle_period(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (gred_wred_mode(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) gred_store_wred_set(t, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) case RED_DONT_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) case RED_PROB_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) qdisc_qstats_overlimit(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) q->stats.prob_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto congestion_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) q->stats.prob_mark++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) case RED_HARD_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) qdisc_qstats_overlimit(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) !INET_ECN_set_ce(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) q->stats.forced_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) goto congestion_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) q->stats.forced_mark++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) q->backlog += qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return qdisc_enqueue_tail(skb, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) q->stats.pdrop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) congestion_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return NET_XMIT_CN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static struct sk_buff *gred_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct gred_sched *t = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) skb = qdisc_dequeue_head(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct gred_sched_data *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u16 dp = tc_index_to_dp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) tc_index_to_dp(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) q->backlog -= qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (gred_wred_mode(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (!sch->qstats.backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) red_start_of_idle_period(&t->wred_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (!q->backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) red_start_of_idle_period(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void gred_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct gred_sched *t = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) qdisc_reset_queue(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) for (i = 0; i < t->DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct gred_sched_data *q = t->tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) red_restart(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) q->backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct gred_sched *table = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct tc_gred_qopt_offload opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) .command = command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .handle = sch->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) .parent = sch->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (command == TC_GRED_REPLACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) opt.set.grio_on = gred_rio_mode(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) opt.set.wred_on = gred_wred_mode(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) opt.set.dp_cnt = table->DPs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) opt.set.dp_def = table->def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) for (i = 0; i < table->DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct gred_sched_data *q = table->tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) opt.set.tab[i].present = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) opt.set.tab[i].limit = q->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) opt.set.tab[i].prio = q->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) opt.set.tab[i].is_ecn = gred_use_ecn(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) opt.set.tab[i].probability = q->parms.max_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) opt.set.tab[i].backlog = &q->backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) opt.set.qstats = &sch->qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int gred_offload_dump_stats(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct gred_sched *table = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct tc_gred_qopt_offload *hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!hw_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) hw_stats->command = TC_GRED_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) hw_stats->handle = sch->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) hw_stats->parent = sch->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) for (i = 0; i < MAX_DPs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (table->tab[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) hw_stats->stats.xstats[i] = &table->tab[i]->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Even if driver returns failure adjust the stats - in case offload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * ended but driver still wants to adjust the values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) for (i = 0; i < MAX_DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!table->tab[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) _bstats_update(&sch->bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) hw_stats->stats.bstats[i].bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) hw_stats->stats.bstats[i].packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) sch->qstats.drops += hw_stats->stats.qstats[i].drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) kfree(hw_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static inline void gred_destroy_vq(struct gred_sched_data *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) kfree(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct gred_sched *table = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct tc_gred_sopt *sopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) bool red_flags_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!dps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) sopt = nla_data(dps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (sopt->DPs > MAX_DPs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (sopt->DPs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) NL_SET_ERR_MSG_MOD(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) "number of virtual queues can't be 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (sopt->def_DP >= sopt->DPs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (sopt->flags && gred_per_vq_red_flags_used(table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) table->DPs = sopt->DPs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) table->def = sopt->def_DP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) red_flags_changed = table->red_flags != sopt->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) table->red_flags = sopt->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Every entry point to GRED is synchronized with the above code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * and the DP is checked against DPs, i.e. shadowed VQs can no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * longer be found so we can unlock right here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (sopt->grio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) gred_enable_rio_mode(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) gred_disable_wred_mode(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (gred_wred_mode_check(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) gred_enable_wred_mode(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) gred_disable_rio_mode(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) gred_disable_wred_mode(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (red_flags_changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) for (i = 0; i < table->DPs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (table->tab[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) table->tab[i]->red_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) table->red_flags & GRED_VQ_RED_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) for (i = table->DPs; i < MAX_DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (table->tab[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) gred_destroy_vq(table->tab[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) table->tab[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) gred_offload(sch, TC_GRED_REPLACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static inline int gred_change_vq(struct Qdisc *sch, int dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct tc_gred_qopt *ctl, int prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) u8 *stab, u32 max_P,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct gred_sched_data **prealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct gred_sched *table = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct gred_sched_data *q = table->tab[dp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) table->tab[dp] = q = *prealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) *prealloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) q->DP = dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) q->prio = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (ctl->limit > sch->limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) q->limit = sch->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) q->limit = ctl->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (q->backlog == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) red_end_of_idle_period(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) red_set_parms(&q->parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ctl->Scell_log, stab, max_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) red_set_vars(&q->vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) [TCA_GRED_VQ_DP] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) [TCA_GRED_STAB] = { .len = 256 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) [TCA_GRED_MAX_P] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) [TCA_GRED_LIMIT] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) u32 dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) gred_vq_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (tb[TCA_GRED_VQ_FLAGS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) const struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) nla_for_each_nested(attr, vqs, rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) switch (nla_type(attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) case TCA_GRED_VQ_ENTRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) gred_vq_apply(table, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int gred_vq_validate(struct gred_sched *table, u32 cdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) const struct nlattr *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) u32 dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) gred_vq_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!tb[TCA_GRED_VQ_DP]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (dp >= table->DPs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (dp != cdp && !table->tab[dp]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (tb[TCA_GRED_VQ_FLAGS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (table->red_flags && table->red_flags != red_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (red_flags & ~GRED_VQ_RED_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) NL_SET_ERR_MSG_MOD(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) "invalid RED flags specified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct nlattr *vqs, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) const struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) int rem, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) gred_vqe_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) nla_for_each_nested(attr, vqs, rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) switch (nla_type(attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case TCA_GRED_VQ_ENTRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) err = gred_vq_validate(table, cdp, attr, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (rem > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static int gred_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct gred_sched *table = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct tc_gred_qopt *ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct nlattr *tb[TCA_GRED_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) int err, prio = GRED_DEF_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) u8 *stab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) u32 max_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct gred_sched_data *prealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (opt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (tb[TCA_GRED_LIMIT] != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (tb[TCA_GRED_PARMS] == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) tb[TCA_GRED_STAB] == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) tb[TCA_GRED_LIMIT] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ctl = nla_data(tb[TCA_GRED_PARMS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) stab = nla_data(tb[TCA_GRED_STAB]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (ctl->DP >= table->DPs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (tb[TCA_GRED_VQ_LIST]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (gred_rio_mode(table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (ctl->prio == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int def_prio = GRED_DEF_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (table->tab[table->def])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) def_prio = table->tab[table->def]->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) printk(KERN_DEBUG "GRED: DP %u does not have a prio "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) "setting default to %d\n", ctl->DP, def_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) prio = def_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) prio = ctl->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) goto err_unlock_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (tb[TCA_GRED_VQ_LIST])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (gred_rio_mode(table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) gred_disable_wred_mode(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (gred_wred_mode_check(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) gred_enable_wred_mode(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) kfree(prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) gred_offload(sch, TC_GRED_REPLACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) err_unlock_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) kfree(prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static int gred_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct nlattr *tb[TCA_GRED_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) NL_SET_ERR_MSG_MOD(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) "virtual queue configuration can't be specified at initialization time");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (tb[TCA_GRED_LIMIT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) sch->limit = qdisc_dev(sch)->tx_queue_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * psched_mtu(qdisc_dev(sch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct gred_sched *table = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct nlattr *parms, *vqs, *opts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) u32 max_p[MAX_DPs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct tc_gred_sopt sopt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .DPs = table->DPs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .def_DP = table->def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .grio = gred_rio_mode(table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) .flags = table->red_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (gred_offload_dump_stats(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (opts == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) for (i = 0; i < MAX_DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct gred_sched_data *q = table->tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) max_p[i] = q ? q->parms.max_P : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Old style all-in-one dump of VQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (parms == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) for (i = 0; i < MAX_DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct gred_sched_data *q = table->tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct tc_gred_qopt opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) unsigned long qavg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) memset(&opt, 0, sizeof(opt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (!q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* hack -- fix at some point with proper message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) This is how we indicate to tc that there is no VQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) at this DP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) opt.DP = MAX_DPs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) goto append_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) opt.limit = q->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) opt.DP = q->DP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) opt.backlog = gred_backlog(table, q, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) opt.prio = q->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) opt.Wlog = q->parms.Wlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) opt.Plog = q->parms.Plog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) opt.Scell_log = q->parms.Scell_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) opt.other = q->stats.other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) opt.early = q->stats.prob_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) opt.forced = q->stats.forced_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) opt.pdrop = q->stats.pdrop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) opt.packets = q->packetsin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) opt.bytesin = q->bytesin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (gred_wred_mode(table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) gred_load_wred_set(table, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) qavg = red_calc_qavg(&q->parms, &q->vars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) q->vars.qavg >> q->parms.Wlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) opt.qave = qavg >> q->parms.Wlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) append_opt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (nla_append(skb, sizeof(opt), &opt) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) nla_nest_end(skb, parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Dump the VQs again, in more structured way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!vqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) for (i = 0; i < MAX_DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct gred_sched_data *q = table->tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct nlattr *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (!vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) TCA_GRED_VQ_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) gred_backlog(table, q, sch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) q->stats.prob_drop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) q->stats.prob_mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) q->stats.forced_drop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) q->stats.forced_mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) nla_nest_end(skb, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) nla_nest_end(skb, vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return nla_nest_end(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) nla_nest_cancel(skb, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static void gred_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct gred_sched *table = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) for (i = 0; i < table->DPs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (table->tab[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) gred_destroy_vq(table->tab[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) gred_offload(sch, TC_GRED_DESTROY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) .id = "gred",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) .priv_size = sizeof(struct gred_sched),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) .enqueue = gred_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) .dequeue = gred_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) .peek = qdisc_peek_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) .init = gred_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) .reset = gred_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .destroy = gred_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) .change = gred_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .dump = gred_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static int __init gred_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return register_qdisc(&gred_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static void __exit gred_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) unregister_qdisc(&gred_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) module_init(gred_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) module_exit(gred_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) MODULE_LICENSE("GPL");