Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * net/sched/sch_prio.c	Simple 3-band priority "scheduler".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Fixes:       19990609: J Hadi Salim <hadi@nortelnetworks.com>:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *              Init --  EINVAL when opt undefined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) struct prio_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	int bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	struct tcf_proto __rcu *filter_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct tcf_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	u8  prio2band[TC_PRIO_MAX+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct Qdisc *queues[TCQ_PRIO_BANDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static struct Qdisc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	u32 band = skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct tcf_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct tcf_proto *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (TC_H_MAJ(skb->priority) != sch->handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		fl = rcu_dereference_bh(q->filter_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		err = tcf_classify(skb, fl, &res, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		case TC_ACT_STOLEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		case TC_ACT_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		case TC_ACT_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		case TC_ACT_SHOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		if (!fl || err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			if (TC_H_MAJ(band))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 				band = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			return q->queues[q->prio2band[band & TC_PRIO_MAX]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		band = res.classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	band = TC_H_MIN(band) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (band >= q->bands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		return q->queues[q->prio2band[0]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	return q->queues[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	unsigned int len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	qdisc = prio_classify(skb, sch, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (qdisc == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if (ret & __NET_XMIT_BYPASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		__qdisc_drop(skb, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	ret = qdisc_enqueue(skb, qdisc, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (ret == NET_XMIT_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		sch->qstats.backlog += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		return NET_XMIT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (net_xmit_drop_count(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		qdisc_qstats_drop(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static struct sk_buff *prio_peek(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	for (prio = 0; prio < q->bands; prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		struct Qdisc *qdisc = q->queues[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		struct sk_buff *skb = qdisc->ops->peek(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static struct sk_buff *prio_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	for (prio = 0; prio < q->bands; prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		struct Qdisc *qdisc = q->queues[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) prio_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	for (prio = 0; prio < q->bands; prio++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		qdisc_reset(q->queues[prio]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	sch->qstats.backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	sch->q.qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct tc_prio_qopt_offload opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		.handle = sch->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		.parent = sch->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (qopt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		opt.command = TC_PRIO_REPLACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		opt.replace_params.bands = qopt->bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		memcpy(&opt.replace_params.priomap, qopt->priomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		       TC_PRIO_MAX + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		opt.replace_params.qstats = &sch->qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		opt.command = TC_PRIO_DESTROY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) prio_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	tcf_block_put(q->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	prio_offload(sch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	for (prio = 0; prio < q->bands; prio++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		qdisc_put(q->queues[prio]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		     struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct Qdisc *queues[TCQ_PRIO_BANDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	int oldbands = q->bands, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	struct tc_prio_qopt *qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (nla_len(opt) < sizeof(*qopt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	qopt = nla_data(opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	for (i = 0; i <= TC_PRIO_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		if (qopt->priomap[i] >= qopt->bands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	/* Before commit, make sure we can allocate all new qdiscs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	for (i = oldbands; i < qopt->bands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 					      TC_H_MAKE(sch->handle, i + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 					      extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		if (!queues[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			while (i > oldbands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 				qdisc_put(queues[--i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	prio_offload(sch, qopt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	sch_tree_lock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	q->bands = qopt->bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	for (i = q->bands; i < oldbands; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		qdisc_tree_flush_backlog(q->queues[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	for (i = oldbands; i < q->bands; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		q->queues[i] = queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		if (q->queues[i] != &noop_qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			qdisc_hash_add(q->queues[i], true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	sch_tree_unlock(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	for (i = q->bands; i < oldbands; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		qdisc_put(q->queues[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static int prio_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		     struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	return prio_tune(sch, opt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static int prio_dump_offload(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct tc_prio_qopt_offload hw_stats = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		.command = TC_PRIO_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		.handle = sch->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		.parent = sch->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			.stats = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 				.bstats = &sch->bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 				.qstats = &sch->qstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_PRIO, &hw_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	struct tc_prio_qopt opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	opt.bands = q->bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	err = prio_dump_offload(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		      struct Qdisc **old, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	struct tc_prio_qopt_offload graft_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	unsigned long band = arg - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 					TC_H_MAKE(sch->handle, arg), extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			new = &noop_qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			qdisc_hash_add(new, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	*old = qdisc_replace(sch, new, &q->queues[band]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	graft_offload.handle = sch->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	graft_offload.parent = sch->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	graft_offload.graft_params.band = band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	graft_offload.graft_params.child_handle = new->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	graft_offload.command = TC_PRIO_GRAFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 				   TC_SETUP_QDISC_PRIO, &graft_offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 				   extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static struct Qdisc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) prio_leaf(struct Qdisc *sch, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	unsigned long band = arg - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	return q->queues[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static unsigned long prio_find(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	unsigned long band = TC_H_MIN(classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (band - 1 >= q->bands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	return band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	return prio_find(sch, classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void prio_unbind(struct Qdisc *q, unsigned long cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			   struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	tcm->tcm_handle |= TC_H_MIN(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	tcm->tcm_info = q->queues[cl-1]->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 				 struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct Qdisc *cl_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	cl_q = q->queues[cl - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 				  d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	    qdisc_qstats_copy(d, cl_q) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (arg->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	for (prio = 0; prio < q->bands; prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		if (arg->count < arg->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		if (arg->fn(sch, prio + 1, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 					struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	struct prio_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	if (cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	return q->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static const struct Qdisc_class_ops prio_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	.graft		=	prio_graft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	.leaf		=	prio_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	.find		=	prio_find,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	.walk		=	prio_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	.tcf_block	=	prio_tcf_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	.bind_tcf	=	prio_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	.unbind_tcf	=	prio_unbind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	.dump		=	prio_dump_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	.dump_stats	=	prio_dump_class_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	.next		=	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	.cl_ops		=	&prio_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	.id		=	"prio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	.priv_size	=	sizeof(struct prio_sched_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	.enqueue	=	prio_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	.dequeue	=	prio_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	.peek		=	prio_peek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	.init		=	prio_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	.reset		=	prio_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	.destroy	=	prio_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	.change		=	prio_tune,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	.dump		=	prio_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	.owner		=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static int __init prio_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	return register_qdisc(&prio_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void __exit prio_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	unregister_qdisc(&prio_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) module_init(prio_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) module_exit(prio_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) MODULE_LICENSE("GPL");