^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * sch_plug.c Queue traffic until an explicit release command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * There are two ways to use this qdisc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * 1. A simple "instantaneous" plug/unplug operation, by issuing an alternating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * sequence of TCQ_PLUG_BUFFER & TCQ_PLUG_RELEASE_INDEFINITE commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * 2. For network output buffering (a.k.a output commit) functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Output commit property is commonly used by applications using checkpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * based fault-tolerance to ensure that the checkpoint from which a system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * is being restored is consistent w.r.t outside world.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Consider for e.g. Remus - a Virtual Machine checkpointing system,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * wherein a VM is checkpointed, say every 50ms. The checkpoint is replicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * asynchronously to the backup host, while the VM continues executing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * next epoch speculatively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * The following is a typical sequence of output buffer operations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * 1.At epoch i, start_buffer(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * 2. At end of epoch i (i.e. after 50ms):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * 2.1 Stop VM and take checkpoint(i).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * 2.2 start_buffer(i+1) and Resume VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * 3. While speculatively executing epoch(i+1), asynchronously replicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * checkpoint(i) to backup host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * 4. When checkpoint_ack(i) is received from backup, release_buffer(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Thus, this Qdisc would receive the following sequence of commands:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * TCQ_PLUG_BUFFER (epoch i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * .. TCQ_PLUG_BUFFER (epoch i+1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * ....TCQ_PLUG_RELEASE_ONE (epoch i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * ......TCQ_PLUG_BUFFER (epoch i+2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * ........
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * State of the queue, when used for network output buffering:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * plug(i+1) plug(i) head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * ------------------+--------------------+---------------->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * pkts_current_epoch| pkts_last_epoch |pkts_to_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * ----------------->|<--------+--------->|+--------------->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * v v
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct plug_sched_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* If true, the dequeue function releases all packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * from head to end of the queue. The queue turns into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * a pass-through queue for newly arriving packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) bool unplug_indefinite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) bool throttled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Queue Limit in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u32 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Number of packets (output) from the current speculatively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * executing epoch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 pkts_current_epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Number of packets corresponding to the recently finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * epoch. These will be released when we receive a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * TCQ_PLUG_RELEASE_ONE command. This command is typically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * issued after committing a checkpoint at the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 pkts_last_epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Number of packets from the head of the queue, that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * be released (committed checkpoint).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 pkts_to_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct plug_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!q->unplug_indefinite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) q->pkts_current_epoch++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return qdisc_enqueue_tail(skb, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static struct sk_buff *plug_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct plug_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (q->throttled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!q->unplug_indefinite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!q->pkts_to_release) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* No more packets to dequeue. Block the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * and wait for the next release command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) q->throttled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) q->pkts_to_release--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return qdisc_dequeue_head(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static int plug_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct plug_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) q->pkts_current_epoch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) q->pkts_last_epoch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) q->pkts_to_release = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) q->unplug_indefinite = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (opt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) q->limit = qdisc_dev(sch)->tx_queue_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * psched_mtu(qdisc_dev(sch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct tc_plug_qopt *ctl = nla_data(opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (nla_len(opt) < sizeof(*ctl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) q->limit = ctl->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) q->throttled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Receives 4 types of messages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * TCQ_PLUG_BUFFER: Inset a plug into the queue and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * buffer any incoming packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * to beginning of the next plug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Stop buffering packets until the next TCQ_PLUG_BUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * command is received (just act as a pass-thru queue).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * TCQ_PLUG_LIMIT: Increase/decrease queue size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int plug_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct plug_sched_data *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct tc_plug_qopt *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (opt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) msg = nla_data(opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (nla_len(opt) < sizeof(*msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) switch (msg->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case TCQ_PLUG_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Save size of the current buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) q->pkts_last_epoch = q->pkts_current_epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) q->pkts_current_epoch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (q->unplug_indefinite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) q->throttled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) q->unplug_indefinite = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) case TCQ_PLUG_RELEASE_ONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Add packets from the last complete buffer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * packets to be released set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) q->pkts_to_release += q->pkts_last_epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) q->pkts_last_epoch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) q->throttled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) netif_schedule_queue(sch->dev_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) case TCQ_PLUG_RELEASE_INDEFINITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) q->unplug_indefinite = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) q->pkts_to_release = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) q->pkts_last_epoch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) q->pkts_current_epoch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) q->throttled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) netif_schedule_queue(sch->dev_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) case TCQ_PLUG_LIMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Limit is supplied in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) q->limit = msg->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .id = "plug",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) .priv_size = sizeof(struct plug_sched_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .enqueue = plug_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .dequeue = plug_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .peek = qdisc_peek_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .init = plug_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) .change = plug_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .reset = qdisc_reset_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static int __init plug_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return register_qdisc(&plug_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static void __exit plug_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unregister_qdisc(&plug_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) module_init(plug_module_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) module_exit(plug_module_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) MODULE_LICENSE("GPL");