^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Equalizer Load-balancer for serial network interfaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * NCM: Network and Communications Management, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (c) Copyright 2002 David S. Miller (davem@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This software may be used and distributed according to the terms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * of the GNU General Public License, incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * The author may be reached as simon@ncm.com, or C/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * NCM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Attn: Simon Janes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * 6803 Whittier Ave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * McLean VA 22101
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Phone: 1-703-847-0040 ext 103
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Sources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * skeleton.c by Donald Becker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Inspirations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * The Harried and Overworked Alan Cox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Conspiracies:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * The Alan Cox and Mike McLagan plot to get someone else to do the code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * which turned out to be me.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * $Log: eql.c,v $
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Revision 1.2 1996/04/11 17:51:52 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Added one-line eql_remove_slave patch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Revision 1.1 1996/04/11 17:44:17 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Initial revision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Revision 3.13 1996/01/21 15:17:18 alan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * tx_queue_len changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * reformatted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Revision 3.12 1995/03/22 21:07:51 anarchy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Added capable() checks on configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Moved header file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Revision 3.11 1995/01/19 23:14:31 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * (priority_Bps) + bytes_queued * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Revision 3.10 1995/01/19 23:07:53 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * (priority_Bps) + bytes_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Revision 3.9 1995/01/19 22:38:20 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * (priority_Bps) + bytes_queued * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Revision 3.8 1995/01/19 22:30:55 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * (priority_Bps) + bytes_queued * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Revision 3.7 1995/01/19 21:52:35 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * printk's trimmed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Revision 3.6 1995/01/19 21:49:56 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * This is working pretty well. I gained 1 K/s in speed.. now it's just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * robustness and printk's to be diked out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Revision 3.5 1995/01/18 22:29:59 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * still crashes the kernel when the lock_wait thing is woken up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Revision 3.4 1995/01/18 21:59:47 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Broken set-bit locking snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Revision 3.3 1995/01/17 22:09:18 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * infinite sleep in a lock somewhere..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Revision 3.2 1995/01/15 16:46:06 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Log trimmed of non-pertinent 1.x branch messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Revision 3.1 1995/01/15 14:41:45 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * New Scheduler and timer stuff...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Revision 1.15 1995/01/15 14:29:02 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * with the dumber scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Revision 1.14 1995/01/15 02:37:08 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * shock.. the kept-new-versions could have zonked working
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * stuff.. shudder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Revision 1.13 1995/01/15 02:36:31 guru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * big changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * scheduler was torn out and replaced with something smarter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * global names not prefixed with eql_ were renamed to protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * against namespace collisions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * a few more abstract interfaces were added to facilitate any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * potential change of datastructure. the driver is still using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * a linked list of slaves. going to a heap would be a bit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * an overkill.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * this compiles fine with no warnings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * the locking mechanism and timer stuff must be written however,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * this version will not work otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Sorry, I had to rewrite most of this for 2.5.x -DaveM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #include <linux/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #include <linux/if_eql.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #include <linux/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int eql_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int eql_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void eql_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) equalizer_t *eql = from_timer(eql, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct list_head *this, *tmp, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_lock(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) head = &eql->queue.all_slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) list_for_each_safe(this, tmp, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) slave_t *slave = list_entry(this, slave_t, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if ((slave->dev->flags & IFF_UP) == IFF_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) slave->bytes_queued -= slave->priority_Bps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (slave->bytes_queued < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) slave->bytes_queued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) eql_kill_one_slave(&eql->queue, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) spin_unlock(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) add_timer(&eql->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static const char version[] __initconst =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static const struct net_device_ops eql_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) .ndo_open = eql_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) .ndo_stop = eql_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) .ndo_do_ioctl = eql_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) .ndo_start_xmit = eql_slave_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void __init eql_setup(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) equalizer_t *eql = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) timer_setup(&eql->timer, eql_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) spin_lock_init(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) INIT_LIST_HEAD(&eql->queue.all_slaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) eql->queue.master_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dev->netdev_ops = &eql_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Now we undo some of the things that eth_setup does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * that we don't like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dev->flags = IFF_MASTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) dev->type = ARPHRD_SLIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dev->tx_queue_len = 5; /* Hands them off fast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) netif_keep_dst(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static int eql_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) equalizer_t *eql = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* XXX We should force this off automatically for the user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) netdev_info(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) "remember to turn off Van-Jacobson compression on your slave devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) BUG_ON(!list_empty(&eql->queue.all_slaves));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) eql->min_slaves = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) add_timer(&eql->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) list_del(&slave->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) queue->num_slaves--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) slave->dev->flags &= ~IFF_SLAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dev_put(slave->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) kfree(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void eql_kill_slave_queue(slave_queue_t *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct list_head *head, *tmp, *this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spin_lock_bh(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) head = &queue->all_slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) list_for_each_safe(this, tmp, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) slave_t *s = list_entry(this, slave_t, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) eql_kill_one_slave(queue, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) spin_unlock_bh(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int eql_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) equalizer_t *eql = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * The timer has to be stopped first before we start hacking away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * at the data structure it scans every so often...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) del_timer_sync(&eql->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) eql_kill_slave_queue(&eql->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) !capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) case EQL_ENSLAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return eql_enslave(dev, ifr->ifr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) case EQL_EMANCIPATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return eql_emancipate(dev, ifr->ifr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) case EQL_GETSLAVECFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return eql_g_slave_cfg(dev, ifr->ifr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) case EQL_SETSLAVECFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return eql_s_slave_cfg(dev, ifr->ifr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case EQL_GETMASTRCFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return eql_g_master_cfg(dev, ifr->ifr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) case EQL_SETMASTRCFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return eql_s_master_cfg(dev, ifr->ifr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* queue->lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned long best_load = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct list_head *this, *tmp, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) slave_t *best_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) best_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Make a pass to set the best slave. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) head = &queue->all_slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) list_for_each_safe(this, tmp, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) slave_t *slave = list_entry(this, slave_t, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned long slave_load, bytes_queued, priority_Bps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Go through the slave list once, updating best_slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * whenever a new best_load is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) bytes_queued = slave->bytes_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) priority_Bps = slave->priority_Bps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if ((slave->dev->flags & IFF_UP) == IFF_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) slave_load = (~0UL - (~0UL / 2)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) (priority_Bps) + bytes_queued * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (slave_load < best_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) best_load = slave_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) best_slave = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* We found a dead slave, kill it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) eql_kill_one_slave(queue, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return best_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) equalizer_t *eql = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) slave_t *slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_lock(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) slave = __eql_schedule_slaves(&eql->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct net_device *slave_dev = slave->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) skb->dev = slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) skb->priority = TC_PRIO_FILLER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) slave->bytes_queued += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) spin_unlock(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Private ioctl functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* queue->lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct list_head *this, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) head = &queue->all_slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) list_for_each(this, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) slave_t *slave = list_entry(this, slave_t, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (slave->dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static inline int eql_is_full(slave_queue_t *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) equalizer_t *eql = netdev_priv(queue->master_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (queue->num_slaves >= eql->max_slaves)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* queue->lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!eql_is_full(queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) slave_t *duplicate_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (duplicate_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) eql_kill_one_slave(queue, duplicate_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) dev_hold(slave->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) list_add(&slave->list, &queue->all_slaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) queue->num_slaves++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) slave->dev->flags |= IFF_SLAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) slaving_request_t srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (!slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if ((master_dev->flags & IFF_UP) == IFF_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* slave is not a master & not already a slave: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) equalizer_t *eql = netdev_priv(master_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) memset(s, 0, sizeof(*s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) s->dev = slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) s->priority = srq.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) s->priority_bps = srq.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) s->priority_Bps = srq.priority / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) spin_lock_bh(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ret = __eql_insert_slave(&eql->queue, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) spin_unlock_bh(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) equalizer_t *eql = netdev_priv(master_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) slaving_request_t srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_lock_bh(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (eql_is_slave(slave_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) eql_kill_one_slave(&eql->queue, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) spin_unlock_bh(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) equalizer_t *eql = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) slave_t *slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) slave_config_t sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) spin_lock_bh(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (eql_is_slave(slave_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) slave = __eql_find_slave_dev(&eql->queue, slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) sc.priority = slave->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) spin_unlock_bh(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) slave_t *slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) equalizer_t *eql;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) slave_config_t sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (!slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) eql = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) spin_lock_bh(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (eql_is_slave(slave_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) slave = __eql_find_slave_dev(&eql->queue, slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) slave->priority = sc.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) slave->priority_bps = sc.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) slave->priority_Bps = sc.priority / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) spin_unlock_bh(&eql->queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) equalizer_t *eql;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) master_config_t mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) memset(&mc, 0, sizeof(master_config_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (eql_is_master(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) eql = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) mc.max_slaves = eql->max_slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) mc.min_slaves = eql->min_slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (copy_to_user(mcp, &mc, sizeof (master_config_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) equalizer_t *eql;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) master_config_t mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (copy_from_user(&mc, mcp, sizeof (master_config_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (eql_is_master(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) eql = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) eql->max_slaves = mc.max_slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) eql->min_slaves = mc.min_slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static struct net_device *dev_eql;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static int __init eql_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pr_info("%s\n", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", NET_NAME_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) eql_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (!dev_eql)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) err = register_netdev(dev_eql);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) free_netdev(dev_eql);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static void __exit eql_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) unregister_netdev(dev_eql);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) free_netdev(dev_eql);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) module_init(eql_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) module_exit(eql_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) MODULE_LICENSE("GPL");