Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2008, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Author: Alexander Duyck <alexander.h.duyck@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) struct multiq_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	u16 bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	u16 max_bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	u16 curband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	struct tcf_proto __rcu *filter_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct tcf_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct Qdisc **queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static struct Qdisc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	u32 band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct tcf_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	err = tcf_classify(skb, fl, &res, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	case TC_ACT_STOLEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	case TC_ACT_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	case TC_ACT_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	case TC_ACT_SHOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	band = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (band >= q->bands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		return q->queues[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	return q->queues[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	       struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	qdisc = multiq_classify(skb, sch, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	if (qdisc == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		if (ret & __NET_XMIT_BYPASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		__qdisc_drop(skb, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	ret = qdisc_enqueue(skb, qdisc, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (ret == NET_XMIT_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		return NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (net_xmit_drop_count(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	int band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	for (band = 0; band < q->bands; band++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		/* cycle through bands to ensure fairness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		q->curband++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		if (q->curband >= q->bands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			q->curband = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		/* Check that target subqueue is available before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		 * pulling an skb to avoid head-of-line blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		if (!netif_xmit_stopped(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		    netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			qdisc = q->queues[q->curband];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			skb = qdisc->dequeue(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 				qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 				sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 				return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static struct sk_buff *multiq_peek(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	unsigned int curband = q->curband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	int band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	for (band = 0; band < q->bands; band++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		/* cycle through bands to ensure fairness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		curband++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		if (curband >= q->bands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			curband = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		/* Check that target subqueue is available before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		 * pulling an skb to avoid head-of-line blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		if (!netif_xmit_stopped(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		    netdev_get_tx_queue(qdisc_dev(sch), curband))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			qdisc = q->queues[curband];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			skb = qdisc->ops->peek(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 				return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) multiq_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	u16 band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	for (band = 0; band < q->bands; band++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		qdisc_reset(q->queues[band]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	sch->q.qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	q->curband = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) multiq_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	int band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	tcf_block_put(q->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	for (band = 0; band < q->bands; band++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		qdisc_put(q->queues[band]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	kfree(q->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		       struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct tc_multiq_qopt *qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	struct Qdisc **removed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	int i, n_removed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (!netif_is_multiqueue(qdisc_dev(sch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (nla_len(opt) < sizeof(*qopt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	qopt = nla_data(opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (!removed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	q->bands = qopt->bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	for (i = q->bands; i < q->max_bands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		if (q->queues[i] != &noop_qdisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			struct Qdisc *child = q->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			q->queues[i] = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			qdisc_purge_queue(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			removed[n_removed++] = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	for (i = 0; i < n_removed; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		qdisc_put(removed[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	kfree(removed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	for (i = 0; i < q->bands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		if (q->queues[i] == &noop_qdisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			struct Qdisc *child, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			child = qdisc_create_dflt(sch->dev_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 						  &pfifo_qdisc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 						  TC_H_MAKE(sch->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 							    i + 1), extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			if (child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 				sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 				old = q->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 				q->queues[i] = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 				if (child != &noop_qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 					qdisc_hash_add(child, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 				if (old != &noop_qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 					qdisc_purge_queue(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 				sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				qdisc_put(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		       struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	q->queues = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	q->max_bands = qdisc_dev(sch)->num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (!q->queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	for (i = 0; i < q->max_bands; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		q->queues[i] = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	return multiq_tune(sch, opt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct tc_multiq_qopt opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	opt.bands = q->bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	opt.max_bands = q->max_bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			struct Qdisc **old, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	unsigned long band = arg - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		new = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	*old = qdisc_replace(sch, new, &q->queues[band]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static struct Qdisc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) multiq_leaf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	unsigned long band = arg - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	return q->queues[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	unsigned long band = TC_H_MIN(classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (band - 1 >= q->bands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	return band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 				 u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	return multiq_find(sch, classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void multiq_unbind(struct Qdisc *q, unsigned long cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			     struct sk_buff *skb, struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	tcm->tcm_handle |= TC_H_MIN(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	tcm->tcm_info = q->queues[cl - 1]->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 				 struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	struct Qdisc *cl_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	cl_q = q->queues[cl - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 				  d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	    qdisc_qstats_copy(d, cl_q) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	int band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	if (arg->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	for (band = 0; band < q->bands; band++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		if (arg->count < arg->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		if (arg->fn(sch, band + 1, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 					  struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	struct multiq_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	if (cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	return q->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static const struct Qdisc_class_ops multiq_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	.graft		=	multiq_graft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	.leaf		=	multiq_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	.find		=	multiq_find,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	.walk		=	multiq_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	.tcf_block	=	multiq_tcf_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	.bind_tcf	=	multiq_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	.unbind_tcf	=	multiq_unbind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	.dump		=	multiq_dump_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	.dump_stats	=	multiq_dump_class_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	.next		=	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	.cl_ops		=	&multiq_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	.id		=	"multiq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	.priv_size	=	sizeof(struct multiq_sched_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	.enqueue	=	multiq_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	.dequeue	=	multiq_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	.peek		=	multiq_peek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	.init		=	multiq_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	.reset		=	multiq_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	.destroy	=	multiq_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	.change		=	multiq_tune,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	.dump		=	multiq_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	.owner		=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int __init multiq_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	return register_qdisc(&multiq_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void __exit multiq_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	unregister_qdisc(&multiq_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) module_init(multiq_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) module_exit(multiq_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) MODULE_LICENSE("GPL");