Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * INET		An implementation of the TCP/IP protocol suite for the LINUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *		operating system.  INET is implemented using the  BSD Socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *		interface as the means of communication with the user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *		PACKET - implements raw packet sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Authors:	Ross Biro
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Fixes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *		Alan Cox	:	verify_area() now used correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *		Alan Cox	:	new skbuff lists, look ma no backlogs!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *		Alan Cox	:	tidied skbuff lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *		Alan Cox	:	Now uses generic datagram routines I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *					added. Also fixed the peek/read crash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *					from all old Linux datagram code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *		Alan Cox	:	Uses the improved datagram code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *		Alan Cox	:	Added NULL's for socket options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *		Alan Cox	:	Re-commented the code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *		Alan Cox	:	Use new kernel side addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *		Rob Janssen	:	Correct MTU usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *		Dave Platt	:	Counter leaks caused by incorrect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *					interrupt locking and some slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *					dubious gcc output. Can you read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *					compiler: it said _VOLATILE_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *	Richard Kooijman	:	Timestamp fixes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *		Alan Cox	:	New buffers. Use sk->mac.raw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *		Alan Cox	:	sendmsg/recvmsg support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *		Alan Cox	:	Protocol setting support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *	Alexey Kuznetsov	:	Untied from IPv4 stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *	Cyrus Durgin		:	Fixed kerneld for kmod.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *	Michal Ostrowski        :       Module initialization cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *         Ulises Alonso        :       Frame number limit removal and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *                                      packet_set_ring memory leak.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *					The convention is that longer addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  *					will simply extend the hardware address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *					byte arrays at the end of sockaddr_ll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *					and packet_mreq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *		Johann Baudy	:	Added TX RING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *					layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *					Copyright (C) 2011, <lokec@ccs.neu.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <linux/if_packet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <linux/wireless.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <net/protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #include <asm/ioctls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #include <linux/virtio_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #include <linux/errqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #include <linux/net_tstamp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #ifdef CONFIG_INET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #include <net/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)    Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)    - If the device has no dev->header_ops->create, there is no LL header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)      visible above the device. In this case, its hard_header_len should be 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)      The device may prepend its own header internally. In this case, its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)      needed_headroom should be set to the space needed for it to add its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)      internal header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)      For example, a WiFi driver pretending to be an Ethernet driver should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)      set its hard_header_len to be the Ethernet header length, and set its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)      needed_headroom to be (the real WiFi header length - the fake Ethernet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)      header length).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)    - packet socket receives packets with pulled ll header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)      so that SOCK_RAW should push it back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) On receive:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) -----------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) Incoming, dev_has_header(dev) == true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)    mac_header -> ll header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)    data       -> data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) Outgoing, dev_has_header(dev) == true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)    mac_header -> ll header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)    data       -> ll header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) Incoming, dev_has_header(dev) == false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)    mac_header -> data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)      However drivers often make it point to the ll header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)      This is incorrect because the ll header should be invisible to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)    data       -> data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) Outgoing, dev_has_header(dev) == false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)    mac_header -> data. ll header is invisible to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)    data       -> data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) Resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)   If dev_has_header(dev) == false we are unable to restore the ll header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)     because it is invisible to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) On transmit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) ------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) dev->header_ops != NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)    mac_header -> ll header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)    data       -> ll header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) dev->header_ops == NULL (ll header is invisible to us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)    mac_header -> data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)    data       -> data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)    We should set network_header on output to the correct position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)    packet classifier depends on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) /* Private packet socket structures. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) /* identical to struct packet_mreq except it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * a longer address field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) struct packet_mreq_max {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	int		mr_ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	unsigned short	mr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	unsigned short	mr_alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	unsigned char	mr_address[MAX_ADDR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) union tpacket_uhdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	struct tpacket_hdr  *h1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	struct tpacket2_hdr *h2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	struct tpacket3_hdr *h3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	void *raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		int closing, int tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define V3_ALIGNMENT	(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) #define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define BLK_PLUS_PRIV(sz_of_priv) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) struct packet_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		       struct packet_type *pt, struct net_device *orig_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static void *packet_previous_frame(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		struct packet_ring_buffer *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		int status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) static void packet_increment_head(struct packet_ring_buffer *buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) static int prb_curr_blk_in_use(struct tpacket_block_desc *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			struct packet_sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) static void prb_retire_current_block(struct tpacket_kbdq_core *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		struct packet_sock *, unsigned int status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) static int prb_queue_frozen(struct tpacket_kbdq_core *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static void prb_open_block(struct tpacket_kbdq_core *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		struct tpacket_block_desc *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) static void prb_retire_rx_blk_timer_expired(struct timer_list *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) static void prb_clear_rxhash(struct tpacket_kbdq_core *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		struct tpacket3_hdr *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		struct tpacket3_hdr *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static void packet_flush_mclist(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static u16 packet_pick_tx_queue(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) struct packet_skb_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		struct sockaddr_pkt pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			/* Trick: alias skb original length with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 			 * ll.sll_family and ll.protocol in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			 * to save room.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			unsigned int origlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			struct sockaddr_ll ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	} sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #define vio_le() virtio_legacy_is_little_endian()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #define GET_PBLOCK_DESC(x, bid)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) #define GET_NEXT_PRB_BLK_NUM(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	((x)->kactive_blk_num+1) : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static void __fanout_link(struct sock *sk, struct packet_sock *po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) static int packet_direct_xmit(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static struct net_device *packet_cached_dev_get(struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	dev = rcu_dereference(po->cached_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	if (likely(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static void packet_cached_dev_assign(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 				     struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	rcu_assign_pointer(po->cached_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) static void packet_cached_dev_reset(struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	RCU_INIT_POINTER(po->cached_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static bool packet_use_direct_xmit(const struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	return po->xmit == packet_direct_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) static u16 packet_pick_tx_queue(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	struct net_device *dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	const struct net_device_ops *ops = dev->netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	int cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	u16 queue_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) #ifdef CONFIG_XPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	skb->sender_cpu = cpu + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	if (ops->ndo_select_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		queue_index = ops->ndo_select_queue(dev, skb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		queue_index = netdev_cap_txqueue(dev, queue_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		queue_index = netdev_pick_tx(dev, skb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	return queue_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) /* __register_prot_hook must be invoked through register_prot_hook
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  * or from a context in which asynchronous accesses to the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * socket is not possible (packet_create()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) static void __register_prot_hook(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	if (!po->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		if (po->fanout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			__fanout_link(sk, po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			dev_add_pack(&po->prot_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		po->running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static void register_prot_hook(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	__register_prot_hook(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) /* If the sync parameter is true, we will temporarily drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  * the po->bind_lock and do a synchronize_net to make sure no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  * asynchronous packet processing paths still refer to the elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  * of po->prot_hook.  If the sync parameter is false, it is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * callers responsibility to take care of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) static void __unregister_prot_hook(struct sock *sk, bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	lockdep_assert_held_once(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	po->running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	if (po->fanout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		__fanout_unlink(sk, po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		__dev_remove_pack(&po->prot_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	__sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		spin_unlock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		spin_lock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static void unregister_prot_hook(struct sock *sk, bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (po->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		__unregister_prot_hook(sk, sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static inline struct page * __pure pgv_to_page(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (is_vmalloc_addr(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		return vmalloc_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	return virt_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) static void __packet_set_status(struct packet_sock *po, void *frame, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	union tpacket_uhdr h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	h.raw = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		h.h1->tp_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		h.h2->tp_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		h.h3->tp_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		WARN(1, "TPACKET version not supported.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) static int __packet_get_status(const struct packet_sock *po, void *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	union tpacket_uhdr h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	h.raw = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		return h.h1->tp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		return h.h2->tp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		return h.h3->tp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		WARN(1, "TPACKET version not supported.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 				   unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (shhwtstamps &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		return TP_STATUS_TS_RAW_HARDWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	    ktime_to_timespec64_cond(skb->tstamp, ts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		return TP_STATUS_TS_SOFTWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 				    struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	union tpacket_uhdr h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	__u32 ts_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	h.raw = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	 * versions 1 through 3 overflow the timestamps in y2106, since they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 * all store the seconds in a 32-bit unsigned integer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 * If we create a version 4, that should have a 64-bit timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	 * nanoseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		h.h1->tp_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		h.h2->tp_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		h.h2->tp_nsec = ts.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		h.h3->tp_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		h.h3->tp_nsec = ts.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		WARN(1, "TPACKET version not supported.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	/* one flush is safe, as both fields always lie on the same cacheline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	return ts_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) static void *packet_lookup_frame(const struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 				 const struct packet_ring_buffer *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 				 unsigned int position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 				 int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	unsigned int pg_vec_pos, frame_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	union tpacket_uhdr h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	pg_vec_pos = position / rb->frames_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	frame_offset = position % rb->frames_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		(frame_offset * rb->frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (status != __packet_get_status(po, h.raw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	return h.raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) static void *packet_current_frame(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		struct packet_ring_buffer *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	return packet_lookup_frame(po, rb, rb->head, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	del_timer_sync(&pkc->retire_blk_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		struct sk_buff_head *rb_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct tpacket_kbdq_core *pkc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	spin_lock_bh(&rb_queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	pkc->delete_blk_timer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	spin_unlock_bh(&rb_queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	prb_del_retire_blk_timer(pkc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) static void prb_setup_retire_blk_timer(struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	struct tpacket_kbdq_core *pkc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		    0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	pkc->retire_blk_timer.expires = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static int prb_calc_retire_blk_tmo(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 				int blk_size_in_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	unsigned int mbits, div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	struct ethtool_link_ksettings ecmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (unlikely(!dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		return DEFAULT_PRB_RETIRE_TOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	err = __ethtool_get_link_ksettings(dev, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		return DEFAULT_PRB_RETIRE_TOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/* If the link speed is so slow you don't really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	 * need to worry about perf anyways
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	if (ecmd.base.speed < SPEED_1000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	    ecmd.base.speed == SPEED_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		return DEFAULT_PRB_RETIRE_TOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	div = ecmd.base.speed / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	if (div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		mbits /= div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		return mbits + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	return mbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			union tpacket_req_u *req_u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	p1->feature_req_word = req_u->req3.tp_feature_req_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) static void init_prb_bdqc(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			struct packet_ring_buffer *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			struct pgv *pg_vec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			union tpacket_req_u *req_u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	struct tpacket_block_desc *pbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	memset(p1, 0x0, sizeof(*p1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	p1->knxt_seq_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	p1->pkbdq = pg_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	p1->pkblk_start	= pg_vec[0].buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	p1->kblk_size = req_u->req3.tp_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	p1->knum_blocks	= req_u->req3.tp_block_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	p1->hdrlen = po->tp_hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	p1->version = po->tp_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	p1->last_kactive_blk_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	po->stats.stats3.tp_freeze_q_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (req_u->req3.tp_retire_blk_tov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 						req_u->req3.tp_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	rwlock_init(&p1->blk_fill_in_prog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	prb_init_ft_ops(p1, req_u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	prb_setup_retire_blk_timer(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	prb_open_block(p1, pbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) /*  Do NOT update the last_blk_num first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  *  Assumes sk_buff_head lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	mod_timer(&pkc->retire_blk_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			jiffies + pkc->tov_in_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  * Timer logic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  * 1) We refresh the timer only when we open a block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  *    By doing this we don't waste cycles refreshing the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  *	  on packet-by-packet basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  * With a 1MB block-size, on a 1Gbps line, it will take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  * i) ~8 ms to fill a block + ii) memcpy etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  * In this cut we are not accounting for the memcpy time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628)  * So, if the user sets the 'tmo' to 10ms then the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  * will never fire while the block is still getting filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630)  * (which is what we want). However, the user could choose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  * to close a block early and that's fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  * But when the timer does fire, we check whether or not to refresh it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  * Since the tmo granularity is in msecs, it is not too expensive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * to refresh the timer, lets say every '8' msecs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  * Either the user can set the 'tmo' or we can derive it based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  * a) line-speed and b) block-size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  * prb_calc_retire_blk_tmo() calculates the tmo.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct packet_sock *po =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	unsigned int frozen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	struct tpacket_block_desc *pbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	spin_lock(&po->sk.sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	frozen = prb_queue_frozen(pkc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	if (unlikely(pkc->delete_blk_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	/* We only need to plug the race when the block is partially filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	 * tpacket_rcv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	 *		copy_bits() is in progress ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	 *		timer fires on other cpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 *		we can't retire the current block because copy_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 *		is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	if (BLOCK_NUM_PKTS(pbd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		/* Waiting for skb_copy_bits to finish... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		write_lock(&pkc->blk_fill_in_prog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		write_unlock(&pkc->blk_fill_in_prog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		if (!frozen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			if (!BLOCK_NUM_PKTS(pbd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 				/* An empty block. Just refresh the timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 				goto refresh_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			if (!prb_dispatch_next_block(pkc, po))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				goto refresh_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			/* Case 1. Queue was frozen because user-space was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			 *	   lagging behind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			if (prb_curr_blk_in_use(pbd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 				 * Ok, user-space is still behind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 				 * So just refresh the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 				goto refresh_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			       /* Case 2. queue was frozen,user-space caught up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 				* now the link went idle && the timer fired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 				* We don't have a block to close.So we open this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 				* block and restart the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 				* opening a block thaws the queue,restarts timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 				* Thawing/timer-refresh is a side effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 				*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 				prb_open_block(pkc, pbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) refresh_timer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	_prb_refresh_rx_retire_blk_timer(pkc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	spin_unlock(&po->sk.sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		struct tpacket_block_desc *pbd1, __u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	/* Flush everything minus the block header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	u8 *start, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	start = (u8 *)pbd1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	/* Skip the block header(we know header WILL fit in 4K) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	for (; start < end; start += PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		flush_dcache_page(pgv_to_page(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	/* Now update the block status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	BLOCK_STATUS(pbd1) = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	/* Flush the block header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	start = (u8 *)pbd1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	flush_dcache_page(pgv_to_page(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  * Side effect:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * 1) flush the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  * 2) Increment active_blk_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * Note:We DONT refresh the timer on purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  *	Because almost always the next block will be opened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) static void prb_close_block(struct tpacket_kbdq_core *pkc1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		struct tpacket_block_desc *pbd1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		struct packet_sock *po, unsigned int stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	__u32 status = TP_STATUS_USER | stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	struct tpacket3_hdr *last_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	struct sock *sk = &po->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (atomic_read(&po->tp_drops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		status |= TP_STATUS_LOSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	last_pkt->tp_next_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	/* Get the ts of the last pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	if (BLOCK_NUM_PKTS(pbd1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		/* Ok, we tmo'd - so get the current time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		 * It shouldn't really happen as we don't close empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		 * blocks. See prb_retire_rx_blk_timer_expired().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		ktime_get_real_ts64(&ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/* Flush the block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	prb_flush_block(pkc1, pbd1, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	pkc->reset_pending_on_curr_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  * Side effect of opening a block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807)  * 1) prb_queue is thawed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808)  * 2) retire_blk_timer is refreshed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) static void prb_open_block(struct tpacket_kbdq_core *pkc1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	struct tpacket_block_desc *pbd1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	/* We could have just memset this but we will lose the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 * flexibility of making the priv area sticky
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	BLOCK_NUM_PKTS(pbd1) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	ktime_get_real_ts64(&ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	h1->ts_first_pkt.ts_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	pkc1->pkblk_start = (char *)pbd1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	pbd1->version = pkc1->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	pkc1->prev = pkc1->nxt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	prb_thaw_queue(pkc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	_prb_refresh_rx_retire_blk_timer(pkc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  * Queue freeze logic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  * 1) Assume tp_block_nr = 8 blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  * 2) At time 't0', user opens Rx ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  * 4) user-space is either sleeping or processing block '0'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855)  *    it will close block-7,loop around and try to fill block '0'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  *    call-flow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  *    __packet_lookup_frame_in_block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  *      prb_retire_current_block()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  *      prb_dispatch_next_block()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  *        |->(BLOCK_STATUS == USER) evaluates to true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  * 6) Now there are two cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  *    6.1) Link goes idle right after the queue is frozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  *         But remember, the last open_block() refreshed the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  *         When this timer expires,it will refresh itself so that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  *         re-open block-0 in near future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  *    6.2) Link is busy and keeps on receiving packets. This is a simple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  *         case and __packet_lookup_frame_in_block will check if block-0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  *         is free and can now be re-used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 				  struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	pkc->reset_pending_on_curr_blk = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	po->stats.stats3.tp_freeze_q_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * If the next block is free then we will dispatch it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  * and return a good offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883)  * Else, we will freeze the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884)  * So, caller must check the return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct tpacket_block_desc *pbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	/* 1. Get current block num */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	/* 2. If this block is currently in_use then freeze the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		prb_freeze_queue(pkc, po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 * 3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	 * open this block and return the offset where the first packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	 * needs to get stored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	prb_open_block(pkc, pbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	return (void *)pkc->nxt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		struct packet_sock *po, unsigned int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	/* retire/close the current block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		 * Plug the case where copy_bits() is in progress on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		 * have space to copy the pkt in the current block and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		 * called prb_retire_current_block()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		 * We don't need to worry about the TMO case because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		 * the timer-handler already handled this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		if (!(status & TP_STATUS_BLK_TMO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			/* Waiting for skb_copy_bits to finish... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			write_lock(&pkc->blk_fill_in_prog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			write_unlock(&pkc->blk_fill_in_prog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		prb_close_block(pkc, pbd, po, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	return TP_STATUS_USER & BLOCK_STATUS(pbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	return pkc->reset_pending_on_curr_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	__releases(&pkc->blk_fill_in_prog_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	read_unlock(&pkc->blk_fill_in_prog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			struct tpacket3_hdr *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			struct tpacket3_hdr *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	ppd->hv1.tp_rxhash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			struct tpacket3_hdr *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (skb_vlan_tag_present(pkc->skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		ppd->hv1.tp_vlan_tci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		ppd->hv1.tp_vlan_tpid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		ppd->tp_status = TP_STATUS_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			struct tpacket3_hdr *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	ppd->hv1.tp_padding = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	prb_fill_vlan_info(pkc, ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		prb_fill_rxhash(pkc, ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		prb_clear_rxhash(pkc, ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) static void prb_fill_curr_block(char *curr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 				struct tpacket_kbdq_core *pkc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 				struct tpacket_block_desc *pbd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 				unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	__acquires(&pkc->blk_fill_in_prog_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	struct tpacket3_hdr *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	ppd  = (struct tpacket3_hdr *)curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	pkc->prev = curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	BLOCK_NUM_PKTS(pbd) += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	read_lock(&pkc->blk_fill_in_prog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	prb_run_all_ft_ops(pkc, ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* Assumes caller has the sk->rx_queue.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static void *__packet_lookup_frame_in_block(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 					    struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 					    unsigned int len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 					    )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	struct tpacket_kbdq_core *pkc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	struct tpacket_block_desc *pbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	char *curr, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	/* Queue is frozen when user space is lagging behind */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (prb_queue_frozen(pkc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		 * Check if that last block which caused the queue to freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		 * is still in_use by user-space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		if (prb_curr_blk_in_use(pbd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			/* Can't record this packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			 * Ok, the block was released by user-space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			 * Now let's open that block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			 * opening a block also thaws the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			 * Thawing is a side effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			prb_open_block(pkc, pbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	curr = pkc->nxt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	pkc->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	end = (char *)pbd + pkc->kblk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	/* first try the current block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		prb_fill_curr_block(curr, pkc, pbd, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		return (void *)curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	/* Ok, close the current block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	prb_retire_current_block(pkc, po, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	/* Now, try to dispatch the next block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	curr = (char *)prb_dispatch_next_block(pkc, po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (curr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		prb_fill_curr_block(curr, pkc, pbd, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		return (void *)curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	 * No free blocks are available.user_space hasn't caught up yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	 * Queue was just frozen and now this packet will get dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static void *packet_current_rx_frame(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 					    struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 					    int status, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	char *curr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		curr = packet_lookup_frame(po, &po->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 					po->rx_ring.head, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		return curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		return __packet_lookup_frame_in_block(po, skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		WARN(1, "TPACKET version not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static void *prb_lookup_block(const struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			      const struct packet_ring_buffer *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			      unsigned int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			      int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	if (status != BLOCK_STATUS(pbd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	return pbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static int prb_previous_blk_num(struct packet_ring_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	unsigned int prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (rb->prb_bdqc.kactive_blk_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		prev = rb->prb_bdqc.kactive_blk_num-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		prev = rb->prb_bdqc.knum_blocks-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /* Assumes caller has held the rx_queue.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static void *__prb_previous_block(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 					 struct packet_ring_buffer *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 					 int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	unsigned int previous = prb_previous_blk_num(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	return prb_lookup_block(po, rb, previous, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static void *packet_previous_rx_frame(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 					     struct packet_ring_buffer *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 					     int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if (po->tp_version <= TPACKET_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		return packet_previous_frame(po, rb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	return __prb_previous_block(po, rb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static void packet_increment_rx_head(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 					    struct packet_ring_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		return packet_increment_head(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		WARN(1, "TPACKET version not supported.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static void *packet_previous_frame(struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		struct packet_ring_buffer *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	return packet_lookup_frame(po, rb, previous, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static void packet_increment_head(struct packet_ring_buffer *buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static void packet_inc_pending(struct packet_ring_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	this_cpu_inc(*rb->pending_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static void packet_dec_pending(struct packet_ring_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	this_cpu_dec(*rb->pending_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	unsigned int refcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	/* We don't use pending refcount in rx_ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (rb->pending_refcnt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	return refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static int packet_alloc_pending(struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	po->rx_ring.pending_refcnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	if (unlikely(po->tx_ring.pending_refcnt == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static void packet_free_pending(struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	free_percpu(po->tx_ring.pending_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) #define ROOM_POW_OFF	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) #define ROOM_NONE	0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) #define ROOM_LOW	0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) #define ROOM_NORMAL	0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	int idx, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	len = READ_ONCE(po->rx_ring.frame_max) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	idx = READ_ONCE(po->rx_ring.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	if (pow_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		idx += len >> pow_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	if (idx >= len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		idx -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	int idx, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	if (pow_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		idx += len >> pow_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (idx >= len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		idx -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int __packet_rcv_has_room(const struct packet_sock *po,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 				 const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	const struct sock *sk = &po->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	int ret = ROOM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	if (po->prot_hook.func != tpacket_rcv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 				   - (skb ? skb->truesize : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		if (avail > (rcvbuf >> ROOM_POW_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			return ROOM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		else if (avail > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			return ROOM_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			return ROOM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	if (po->tp_version == TPACKET_V3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			ret = ROOM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		else if (__tpacket_v3_has_room(po, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			ret = ROOM_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		if (__tpacket_has_room(po, ROOM_POW_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			ret = ROOM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		else if (__tpacket_has_room(po, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			ret = ROOM_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	int pressure, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	ret = __packet_rcv_has_room(po, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	pressure = ret != ROOM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	if (READ_ONCE(po->pressure) != pressure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		WRITE_ONCE(po->pressure, pressure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static void packet_rcv_try_clear_pressure(struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	if (READ_ONCE(po->pressure) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		WRITE_ONCE(po->pressure,  0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static void packet_sock_destruct(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	skb_queue_purge(&sk->sk_error_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (!sock_flag(sk, SOCK_DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		pr_err("Attempt to release alive packet socket: %p\n", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	sk_refcnt_debug_dec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	u32 *history = po->rollover->history;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	u32 victim, rxhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	int i, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	rxhash = skb_get_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	for (i = 0; i < ROLLOVER_HLEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		if (READ_ONCE(history[i]) == rxhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	victim = prandom_u32() % ROLLOVER_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	/* Avoid dirtying the cache line if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	if (READ_ONCE(history[victim]) != rxhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		WRITE_ONCE(history[victim], rxhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	return count > (ROLLOVER_HLEN >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static unsigned int fanout_demux_hash(struct packet_fanout *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 				      struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 				      unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static unsigned int fanout_demux_lb(struct packet_fanout *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 				    struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				    unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	unsigned int val = atomic_inc_return(&f->rr_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	return val % num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static unsigned int fanout_demux_cpu(struct packet_fanout *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 				     struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 				     unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	return smp_processor_id() % num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static unsigned int fanout_demux_rnd(struct packet_fanout *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 				     struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 				     unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	return prandom_u32_max(num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) static unsigned int fanout_demux_rollover(struct packet_fanout *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 					  struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 					  unsigned int idx, bool try_self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 					  unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	struct packet_sock *po, *po_next, *po_skip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	unsigned int i, j, room = ROOM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	po = pkt_sk(rcu_dereference(f->arr[idx]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (try_self) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		room = packet_rcv_has_room(po, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		if (room == ROOM_NORMAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		po_skip = po;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	i = j = min_t(int, po->rollover->sock, num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		po_next = pkt_sk(rcu_dereference(f->arr[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			if (i != j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 				po->rollover->sock = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			atomic_long_inc(&po->rollover->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			if (room == ROOM_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 				atomic_long_inc(&po->rollover->num_huge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		if (++i == num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	} while (i != j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	atomic_long_inc(&po->rollover->num_failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static unsigned int fanout_demux_qm(struct packet_fanout *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 				    struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 				    unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	return skb_get_queue_mapping(skb) % num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static unsigned int fanout_demux_bpf(struct packet_fanout *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 				     struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 				     unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	prog = rcu_dereference(f->bpf_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	if (prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		ret = bpf_prog_run_clear_cb(prog, skb) % num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	return f->flags & (flag >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			     struct packet_type *pt, struct net_device *orig_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	struct packet_fanout *f = pt->af_packet_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	unsigned int num = READ_ONCE(f->num_members);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	struct net *net = read_pnet(&f->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	struct packet_sock *po;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	if (!net_eq(dev_net(dev), net) || !num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	switch (f->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	case PACKET_FANOUT_HASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		idx = fanout_demux_hash(f, skb, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	case PACKET_FANOUT_LB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		idx = fanout_demux_lb(f, skb, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	case PACKET_FANOUT_CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		idx = fanout_demux_cpu(f, skb, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	case PACKET_FANOUT_RND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		idx = fanout_demux_rnd(f, skb, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	case PACKET_FANOUT_QM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		idx = fanout_demux_qm(f, skb, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	case PACKET_FANOUT_ROLLOVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		idx = fanout_demux_rollover(f, skb, 0, false, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	case PACKET_FANOUT_CBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	case PACKET_FANOUT_EBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		idx = fanout_demux_bpf(f, skb, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		idx = fanout_demux_rollover(f, skb, idx, true, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	po = pkt_sk(rcu_dereference(f->arr[idx]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) DEFINE_MUTEX(fanout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) EXPORT_SYMBOL_GPL(fanout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static LIST_HEAD(fanout_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) static u16 fanout_next_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static void __fanout_link(struct sock *sk, struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	struct packet_fanout *f = po->fanout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	spin_lock(&f->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	rcu_assign_pointer(f->arr[f->num_members], sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	f->num_members++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	if (f->num_members == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		dev_add_pack(&f->prot_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	spin_unlock(&f->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	struct packet_fanout *f = po->fanout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	spin_lock(&f->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	for (i = 0; i < f->num_members; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		if (rcu_dereference_protected(f->arr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 					      lockdep_is_held(&f->lock)) == sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	BUG_ON(i >= f->num_members);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	rcu_assign_pointer(f->arr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			   rcu_dereference_protected(f->arr[f->num_members - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 						     lockdep_is_held(&f->lock)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	f->num_members--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (f->num_members == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		__dev_remove_pack(&f->prot_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	spin_unlock(&f->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (sk->sk_family != PF_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static void fanout_init_data(struct packet_fanout *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	switch (f->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	case PACKET_FANOUT_LB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		atomic_set(&f->rr_cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	case PACKET_FANOUT_CBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	case PACKET_FANOUT_EBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		RCU_INIT_POINTER(f->bpf_prog, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	struct bpf_prog *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	spin_lock(&f->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	rcu_assign_pointer(f->bpf_prog, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	spin_unlock(&f->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		bpf_prog_destroy(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 				unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	struct bpf_prog *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct sock_fprog fprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	ret = copy_bpf_fprog_from_user(&fprog, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	__fanout_set_data_bpf(po->fanout, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 				unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	struct bpf_prog *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	u32 fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	if (len != sizeof(fd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	if (copy_from_sockptr(&fd, data, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	if (IS_ERR(new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		return PTR_ERR(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	__fanout_set_data_bpf(po->fanout, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) static int fanout_set_data(struct packet_sock *po, sockptr_t data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 			   unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	switch (po->fanout->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	case PACKET_FANOUT_CBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		return fanout_set_data_cbpf(po, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	case PACKET_FANOUT_EBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		return fanout_set_data_ebpf(po, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static void fanout_release_data(struct packet_fanout *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	switch (f->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	case PACKET_FANOUT_CBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	case PACKET_FANOUT_EBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		__fanout_set_data_bpf(f, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	struct packet_fanout *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	list_for_each_entry(f, &fanout_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		if (f->id == candidate_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		    read_pnet(&f->net) == sock_net(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	u16 id = fanout_next_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		if (__fanout_id_is_free(sk, id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			*new_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 			fanout_next_id = id + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	} while (id != fanout_next_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) static int fanout_add(struct sock *sk, struct fanout_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	struct packet_rollover *rollover = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	u16 type_flags = args->type_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	struct packet_fanout *f, *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	u8 type = type_flags & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	u8 flags = type_flags >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	u16 id = args->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	case PACKET_FANOUT_ROLLOVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	case PACKET_FANOUT_HASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	case PACKET_FANOUT_LB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	case PACKET_FANOUT_CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	case PACKET_FANOUT_RND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	case PACKET_FANOUT_QM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	case PACKET_FANOUT_CBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	case PACKET_FANOUT_EBPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	mutex_lock(&fanout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	err = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	if (po->fanout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	if (type == PACKET_FANOUT_ROLLOVER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		if (!rollover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		atomic_long_set(&rollover->num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		atomic_long_set(&rollover->num_huge, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		atomic_long_set(&rollover->num_failed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		if (id != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		if (!fanout_find_new_id(sk, &id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		/* ephemeral flag for the first socket in the group: drop it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	match = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	list_for_each_entry(f, &fanout_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		if (f->id == id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		    read_pnet(&f->net) == sock_net(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 			match = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	if (match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		if (match->flags != flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		if (args->max_num_members &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		    args->max_num_members != match->max_num_members)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		if (args->max_num_members > PACKET_FANOUT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		if (!args->max_num_members)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			/* legacy PACKET_FANOUT_MAX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			args->max_num_members = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		match = kvzalloc(struct_size(match, arr, args->max_num_members),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 				 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		write_pnet(&match->net, sock_net(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		match->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		match->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		match->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		INIT_LIST_HEAD(&match->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		spin_lock_init(&match->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		refcount_set(&match->sk_ref, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		fanout_init_data(match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		match->prot_hook.type = po->prot_hook.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		match->prot_hook.dev = po->prot_hook.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		match->prot_hook.func = packet_rcv_fanout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		match->prot_hook.af_packet_priv = match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		match->prot_hook.af_packet_net = read_pnet(&match->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		match->prot_hook.id_match = match_fanout_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		match->max_num_members = args->max_num_members;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		list_add(&match->list, &fanout_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	spin_lock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	if (po->running &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	    match->type == type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	    match->prot_hook.type == po->prot_hook.type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	    match->prot_hook.dev == po->prot_hook.dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		if (refcount_read(&match->sk_ref) < match->max_num_members) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 			__dev_remove_pack(&po->prot_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 			/* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			WRITE_ONCE(po->fanout, match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			po->rollover = rollover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			rollover = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 			__fanout_link(sk, po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 			err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	spin_unlock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	if (err && !refcount_read(&match->sk_ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		list_del(&match->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		kvfree(match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	kfree(rollover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	mutex_unlock(&fanout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)  * It is the responsibility of the caller to call fanout_release_data() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)  * free the returned packet_fanout (after synchronize_net())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static struct packet_fanout *fanout_release(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	struct packet_fanout *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	mutex_lock(&fanout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	f = po->fanout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	if (f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		po->fanout = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		if (refcount_dec_and_test(&f->sk_ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			list_del(&f->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			f = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	mutex_unlock(&fanout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 					  struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	/* Earlier code assumed this would be a VLAN pkt, double-check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	 * this now that we have the actual packet in hand. We can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	 * do this check on Ethernet devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	if (unlikely(dev->type != ARPHRD_ETHER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) static const struct proto_ops packet_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static const struct proto_ops packet_ops_spkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 			   struct packet_type *pt, struct net_device *orig_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	struct sockaddr_pkt *spkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	 *	When we registered the protocol we saved the socket in the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	 *	field for just this event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	sk = pt->af_packet_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	 *	Yank back the headers [hope the device set this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	 *	right or kerboom...]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	 *	Incoming packets have ll header pulled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	 *	push it back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	 *	For outgoing ones skb->data == skb_mac_header(skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	 *	so that this procedure is noop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	if (skb->pkt_type == PACKET_LOOPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	if (!net_eq(dev_net(dev), sock_net(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	skb = skb_share_check(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		goto oom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	/* drop any routing info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	skb_dst_drop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	/* drop conntrack reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	nf_reset_ct(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	skb_push(skb, skb->data - skb_mac_header(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	 *	The SOCK_PACKET socket receives _all_ frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	spkt->spkt_family = dev->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	spkt->spkt_protocol = skb->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	 *	Charge the memory to the socket. This is done specifically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	 *	to prevent sockets using all the memory up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	if (sock_queue_rcv_skb(sk, skb) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) oom:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	    sock->type == SOCK_RAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		skb->protocol = dev_parse_header_protocol(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	skb_probe_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)  *	Output a raw packet to a device layer. This bypasses all the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)  *	protocol layers and you must therefore supply it with a complete frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 			       size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	struct sockcm_cookie sockc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	__be16 proto = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	int extra_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	 *	Get and verify the address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (saddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		if (msg->msg_namelen < sizeof(struct sockaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 			proto = saddr->spkt_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	 *	Find the device first to size check it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	if (dev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	err = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	if (!(dev->flags & IFF_UP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	 * You may not queue a frame bigger than the mtu. This is the lowest level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	 * raw protocol and you must do your own fragmentation at this level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		if (!netif_supports_nofcs(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 			err = -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		extra_len = 4; /* We're doing our own CRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		size_t reserved = LL_RESERVED_SPACE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		int tlen = dev->needed_tailroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		/* FIXME: Save some space for broken drivers that write a hard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		 * header at transmission time by themselves. PPP is the notable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		 * one here. This should really be fixed at the driver level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		skb_reserve(skb, reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		/* Try to align data part correctly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		if (hhlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 			skb->data -= hhlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 			skb->tail -= hhlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			if (len < hhlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 				skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		err = memcpy_from_msg(skb_put(skb, len), msg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	if (!dev_validate_header(dev, skb->data, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	    !packet_extra_vlan_len_allowed(dev, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	sockcm_init(&sockc, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	if (msg->msg_controllen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		err = sock_cmsg_send(sk, msg, &sockc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	skb->protocol = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	skb->priority = sk->sk_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	skb->mark = sk->sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	skb->tstamp = sockc.transmit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	skb_setup_tx_timestamp(skb, sockc.tsflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	if (unlikely(extra_len == 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		skb->no_fcs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	packet_parse_headers(skb, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) static unsigned int run_filter(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			       const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 			       unsigned int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	struct sk_filter *filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	filter = rcu_dereference(sk->sk_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	if (filter != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		res = bpf_prog_run_clear_cb(filter->prog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			   size_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	struct virtio_net_hdr vnet_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	if (*len < sizeof(vnet_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	*len -= sizeof(vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)  * This function makes lazy skb cloning in hope that most of packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)  * are discarded by BPF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)  * and skb->cb are mangled. It works because (and until) packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)  * falling here are owned by current CPU. Output packets are cloned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)  * by dev_queue_xmit_nit(), input packets are processed by net_bh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)  * sequencially, so that if we return skb to original state on exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)  * we will not harm anyone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		      struct packet_type *pt, struct net_device *orig_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	struct sockaddr_ll *sll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	struct packet_sock *po;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	u8 *skb_head = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	int skb_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	unsigned int snaplen, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	bool is_drop_n_account = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	if (skb->pkt_type == PACKET_LOOPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	sk = pt->af_packet_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	if (!net_eq(dev_net(dev), sock_net(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	if (dev_has_header(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		/* The device has an explicit notion of ll header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		 * exported to higher levels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		 * Otherwise, the device hides details of its frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		 * structure, so that corresponding packet head is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		 * never delivered to user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		if (sk->sk_type != SOCK_DGRAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 			skb_push(skb, skb->data - skb_mac_header(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		else if (skb->pkt_type == PACKET_OUTGOING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 			/* Special case: outgoing packets have ll header at head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 			skb_pull(skb, skb_network_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	snaplen = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	res = run_filter(skb, sk, snaplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		goto drop_n_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	if (snaplen > res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		snaplen = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		goto drop_n_acct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	if (skb_shared(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		if (nskb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 			goto drop_n_acct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		if (skb_head != skb->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 			skb->data = skb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 			skb->len = skb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		skb = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	sll = &PACKET_SKB_CB(skb)->sa.ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	sll->sll_hatype = dev->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	sll->sll_pkttype = skb->pkt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	if (unlikely(po->origdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		sll->sll_ifindex = orig_dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		sll->sll_ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	 * Use their space for storing the original skb length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	if (pskb_trim(skb, snaplen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		goto drop_n_acct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	skb_set_owner_r(skb, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	skb->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	skb_dst_drop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	/* drop conntrack reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	nf_reset_ct(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	spin_lock(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	po->stats.stats1.tp_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	sock_skb_set_dropcount(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	__skb_queue_tail(&sk->sk_receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	spin_unlock(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) drop_n_acct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	is_drop_n_account = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	atomic_inc(&po->tp_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	atomic_inc(&sk->sk_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) drop_n_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	if (skb_head != skb->data && skb_shared(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		skb->data = skb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		skb->len = skb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	if (!is_drop_n_account)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		       struct packet_type *pt, struct net_device *orig_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	struct packet_sock *po;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	struct sockaddr_ll *sll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	union tpacket_uhdr h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	u8 *skb_head = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	int skb_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	unsigned int snaplen, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	unsigned long status = TP_STATUS_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	unsigned short macoff, hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	unsigned int netoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	struct sk_buff *copy_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	__u32 ts_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	bool is_drop_n_account = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	unsigned int slot_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	bool do_vnet = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	 * We may add members to them until current aligned size without forcing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	if (skb->pkt_type == PACKET_LOOPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	sk = pt->af_packet_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	if (!net_eq(dev_net(dev), sock_net(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	if (dev_has_header(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		if (sk->sk_type != SOCK_DGRAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 			skb_push(skb, skb->data - skb_mac_header(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		else if (skb->pkt_type == PACKET_OUTGOING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 			/* Special case: outgoing packets have ll header at head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 			skb_pull(skb, skb_network_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	snaplen = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	res = run_filter(skb, sk, snaplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		goto drop_n_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	/* If we are flooded, just give up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		atomic_inc(&po->tp_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		goto drop_n_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		status |= TP_STATUS_CSUMNOTREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	else if (skb->pkt_type != PACKET_OUTGOING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		 (skb->ip_summed == CHECKSUM_COMPLETE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		  skb_csum_unnecessary(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		status |= TP_STATUS_CSUM_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	if (snaplen > res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		snaplen = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	if (sk->sk_type == SOCK_DGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 				  po->tp_reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		unsigned int maclen = skb_network_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 				       (maclen < 16 ? 16 : maclen)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 				       po->tp_reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		if (po->has_vnet_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 			netoff += sizeof(struct virtio_net_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 			do_vnet = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		macoff = netoff - maclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	if (netoff > USHRT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		atomic_inc(&po->tp_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		goto drop_n_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	if (po->tp_version <= TPACKET_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		if (macoff + snaplen > po->rx_ring.frame_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 			if (po->copy_thresh &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 				if (skb_shared(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 					copy_skb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 					copy_skb = skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 					skb_head = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 				if (copy_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 					memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 					       sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 					skb_set_owner_r(copy_skb, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 			snaplen = po->rx_ring.frame_size - macoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 			if ((int)snaplen < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 				snaplen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 				do_vnet = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	} else if (unlikely(macoff + snaplen >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		u32 nval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			    snaplen, nval, macoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		snaplen = nval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		if (unlikely((int)snaplen < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 			snaplen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 			do_vnet = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	spin_lock(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	h.raw = packet_current_rx_frame(po, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 					TP_STATUS_KERNEL, (macoff+snaplen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	if (!h.raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		goto drop_n_account;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	if (po->tp_version <= TPACKET_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		slot_id = po->rx_ring.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			goto drop_n_account;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		__set_bit(slot_id, po->rx_ring.rx_owner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	if (do_vnet &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 				    sizeof(struct virtio_net_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 				    vio_le(), true, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		if (po->tp_version == TPACKET_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 			prb_clear_blk_fill_status(&po->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		goto drop_n_account;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	if (po->tp_version <= TPACKET_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		packet_increment_rx_head(po, &po->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	 * LOSING will be reported till you read the stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	 * because it's COR - Clear On Read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	 * at packet level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		if (atomic_read(&po->tp_drops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 			status |= TP_STATUS_LOSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	po->stats.stats1.tp_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	if (copy_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		status |= TP_STATUS_COPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	spin_unlock(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	/* Always timestamp; prefer an existing software timestamp taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	 * closer to the time of capture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	ts_status = tpacket_get_timestamp(skb, &ts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 					  po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	if (!ts_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		ktime_get_real_ts64(&ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	status |= ts_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		h.h1->tp_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		h.h1->tp_snaplen = snaplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		h.h1->tp_mac = macoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		h.h1->tp_net = netoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		h.h1->tp_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		hdrlen = sizeof(*h.h1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		h.h2->tp_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		h.h2->tp_snaplen = snaplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		h.h2->tp_mac = macoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		h.h2->tp_net = netoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		h.h2->tp_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		h.h2->tp_nsec = ts.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 			h.h2->tp_vlan_tci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 			h.h2->tp_vlan_tpid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		hdrlen = sizeof(*h.h2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		/* tp_nxt_offset,vlan are already populated above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		 * So DONT clear those fields here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		h.h3->tp_status |= status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		h.h3->tp_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		h.h3->tp_snaplen = snaplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 		h.h3->tp_mac = macoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		h.h3->tp_net = netoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		h.h3->tp_sec  = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		h.h3->tp_nsec = ts.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		hdrlen = sizeof(*h.h3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	sll = h.raw + TPACKET_ALIGN(hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	sll->sll_family = AF_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	sll->sll_hatype = dev->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	sll->sll_protocol = skb->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	sll->sll_pkttype = skb->pkt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	if (unlikely(po->origdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		sll->sll_ifindex = orig_dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 		sll->sll_ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	if (po->tp_version <= TPACKET_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		u8 *start, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 					macoff + snaplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		for (start = h.raw; start < end; start += PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 			flush_dcache_page(pgv_to_page(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	if (po->tp_version <= TPACKET_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		spin_lock(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		__packet_set_status(po, h.raw, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		spin_unlock(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	} else if (po->tp_version == TPACKET_V3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		prb_clear_blk_fill_status(&po->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) drop_n_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	if (skb_head != skb->data && skb_shared(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		skb->data = skb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		skb->len = skb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	if (!is_drop_n_account)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) drop_n_account:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	spin_unlock(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	atomic_inc(&po->tp_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	is_drop_n_account = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	kfree_skb(copy_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	goto drop_n_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) static void tpacket_destruct_skb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	struct packet_sock *po = pkt_sk(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	if (likely(po->tx_ring.pg_vec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		void *ph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		__u32 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 		ph = skb_zcopy_get_nouarg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		packet_dec_pending(&po->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 		ts = __packet_set_timestamp(po, ph, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		if (!packet_read_pending(&po->tx_ring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 			complete(&po->skb_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	sock_wfree(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 				 struct virtio_net_hdr *vnet_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	if (*len < sizeof(*vnet_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	*len -= sizeof(*vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	return __packet_snd_vnet_parse(vnet_hdr, *len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 		void *frame, struct net_device *dev, void *data, int tp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 		__be16 proto, unsigned char *addr, int hlen, int copylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 		const struct sockcm_cookie *sockc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	union tpacket_uhdr ph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	int to_write, offset, len, nr_frags, len_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	struct socket *sock = po->sk.sk_socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	ph.raw = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	skb->protocol = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	skb->priority = po->sk.sk_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	skb->mark = po->sk.sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	skb->tstamp = sockc->transmit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	skb_setup_tx_timestamp(skb, sockc->tsflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	skb_zcopy_set_nouarg(skb, ph.raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	skb_reserve(skb, hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	to_write = tp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	if (sock->type == SOCK_DGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 				NULL, tp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	} else if (copylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		int hdrlen = min_t(int, copylen, tp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		skb_push(skb, dev->hard_header_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		skb_put(skb, copylen - dev->hard_header_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		err = skb_store_bits(skb, 0, data, hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		if (!dev_validate_header(dev, skb->data, hdrlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		data += hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		to_write -= hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	offset = offset_in_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	len_max = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	len = ((to_write > len_max) ? len_max : to_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	skb->data_len = to_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	skb->len += to_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	skb->truesize += to_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	refcount_add(to_write, &po->sk.sk_wmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	while (likely(to_write)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		nr_frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 			pr_err("Packet exceed the number of skb frags(%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 			       MAX_SKB_FRAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 		page = pgv_to_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 		data += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		to_write -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		len_max = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		len = ((to_write > len_max) ? len_max : to_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	packet_parse_headers(skb, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	return tp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) static int tpacket_parse_header(struct packet_sock *po, void *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 				int size_max, void **data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	union tpacket_uhdr ph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	int tp_len, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	ph.raw = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		if (ph.h3->tp_next_offset != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 			pr_warn_once("variable sized slot not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		tp_len = ph.h3->tp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		tp_len = ph.h2->tp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		tp_len = ph.h1->tp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	if (unlikely(tp_len > size_max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	if (unlikely(po->tp_tx_has_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 		int off_min, off_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		off_max = po->tx_ring.frame_size - tp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 		if (po->sk.sk_type == SOCK_DGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 			switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 			case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 				off = ph.h3->tp_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 			case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 				off = ph.h2->tp_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 				off = ph.h1->tp_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 			switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 			case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 				off = ph.h3->tp_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 			case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 				off = ph.h2->tp_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 				off = ph.h1->tp_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		if (unlikely((off < off_min) || (off_max < off)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	*data = frame + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	return tp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	struct virtio_net_hdr *vnet_hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	struct sockcm_cookie sockc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	__be16 proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	int err, reserve = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	void *ph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	unsigned char *addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	int tp_len, size_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	int len_sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	int status = TP_STATUS_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	int hlen, tlen, copylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	long timeo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	mutex_lock(&po->pg_vec_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	 * we need to confirm it under protection of pg_vec_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	if (unlikely(!po->tx_ring.pg_vec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	if (likely(saddr == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		dev	= packet_cached_dev_get(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		proto	= READ_ONCE(po->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		if (msg->msg_namelen < (saddr->sll_halen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 					+ offsetof(struct sockaddr_ll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 						sll_addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		proto	= saddr->sll_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		if (po->sk.sk_socket->type == SOCK_DGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 			if (dev && msg->msg_namelen < dev->addr_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 				   offsetof(struct sockaddr_ll, sll_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 				goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 			addr = saddr->sll_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	if (unlikely(dev == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	err = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	if (unlikely(!(dev->flags & IFF_UP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	sockcm_init(&sockc, &po->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	if (msg->msg_controllen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		err = sock_cmsg_send(&po->sk, msg, &sockc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 			goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	if (po->sk.sk_socket->type == SOCK_RAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 		reserve = dev->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	size_max = po->tx_ring.frame_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 		size_max = dev->mtu + reserve + VLAN_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	reinit_completion(&po->skb_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		ph = packet_current_frame(po, &po->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 					  TP_STATUS_SEND_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 		if (unlikely(ph == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 			if (need_wait && skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 				if (timeo <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 					goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 			/* check for additional frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		tp_len = tpacket_parse_header(po, ph, size_max, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		if (tp_len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 			goto tpacket_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		status = TP_STATUS_SEND_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		hlen = LL_RESERVED_SPACE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		tlen = dev->needed_tailroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		if (po->has_vnet_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 			vnet_hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 			data += sizeof(*vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 			tp_len -= sizeof(*vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 			if (tp_len < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 				tp_len = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 				goto tpacket_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 			copylen = __virtio16_to_cpu(vio_le(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 						    vnet_hdr->hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		copylen = max_t(int, copylen, dev->hard_header_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		skb = sock_alloc_send_skb(&po->sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 				hlen + tlen + sizeof(struct sockaddr_ll) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 				(copylen - dev->hard_header_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 				!need_wait, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		if (unlikely(skb == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 			/* we assume the socket was initially writeable ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 			if (likely(len_sum > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 				err = len_sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 			goto out_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 					  addr, hlen, copylen, &sockc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		if (likely(tp_len >= 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		    tp_len > dev->mtu + reserve &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 		    !po->has_vnet_hdr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 		    !packet_extra_vlan_len_allowed(dev, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 			tp_len = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		if (unlikely(tp_len < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) tpacket_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 			if (po->tp_loss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 				__packet_set_status(po, ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 						TP_STATUS_AVAILABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 				packet_increment_head(&po->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 				kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 				status = TP_STATUS_WRONG_FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 				err = tp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 				goto out_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		if (po->has_vnet_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 				tp_len = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 				goto tpacket_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 			virtio_net_hdr_set_proto(skb, vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		skb->destructor = tpacket_destruct_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		__packet_set_status(po, ph, TP_STATUS_SENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 		packet_inc_pending(&po->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		status = TP_STATUS_SEND_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		err = po->xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 		if (unlikely(err > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 			err = net_xmit_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 			if (err && __packet_get_status(po, ph) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 				   TP_STATUS_AVAILABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 				/* skb was destructed already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 				skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 				goto out_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 			 * skb was dropped but not destructed yet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 			 * let's treat it like congestion or err < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 			err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 		packet_increment_head(&po->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		len_sum += tp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	} while (likely((ph != NULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 		/* Note: packet_read_pending() might be slow if we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 		 * to call it as it's per_cpu variable, but in fast-path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		 * we already short-circuit the loop with the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		 * condition, and luckily don't have to go that path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 		 * anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		 (need_wait && packet_read_pending(&po->tx_ring))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	err = len_sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) out_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	__packet_set_status(po, ph, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	mutex_unlock(&po->pg_vec_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 				        size_t reserve, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 				        size_t linear, int noblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 				        int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	/* Under a page?  Don't bother with paged skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	if (prepad + len < PAGE_SIZE || !linear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		linear = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 				   err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	skb_reserve(skb, reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	skb_put(skb, linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	skb->data_len = len - linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	skb->len += len - linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	__be16 proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	unsigned char *addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	int err, reserve = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	struct sockcm_cookie sockc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	struct virtio_net_hdr vnet_hdr = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	bool has_vnet_hdr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	int hlen, tlen, linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	int extra_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	 *	Get and verify the address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	if (likely(saddr == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 		dev	= packet_cached_dev_get(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		proto	= READ_ONCE(po->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		proto	= saddr->sll_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 		if (sock->type == SOCK_DGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 			if (dev && msg->msg_namelen < dev->addr_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 				   offsetof(struct sockaddr_ll, sll_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 				goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 			addr = saddr->sll_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	if (unlikely(dev == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	err = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	if (unlikely(!(dev->flags & IFF_UP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	sockcm_init(&sockc, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	sockc.mark = sk->sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	if (msg->msg_controllen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 		err = sock_cmsg_send(sk, msg, &sockc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	if (sock->type == SOCK_RAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 		reserve = dev->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	if (po->has_vnet_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 		has_vnet_hdr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 		if (!netif_supports_nofcs(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 			err = -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 		extra_len = 4; /* We're doing our own CRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	if (!vnet_hdr.gso_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	hlen = LL_RESERVED_SPACE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	tlen = dev->needed_tailroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	linear = max(linear, min_t(int, len, dev->hard_header_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 			       msg->msg_flags & MSG_DONTWAIT, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 	if (sock->type == SOCK_DGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		if (unlikely(offset < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	} else if (reserve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 		skb_reserve(skb, -reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		if (len < reserve + sizeof(struct ipv6hdr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		    dev->min_header_len != dev->hard_header_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 			skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 	/* Returns -EFAULT on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 	if (sock->type == SOCK_RAW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	    !dev_validate_header(dev, skb->data, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	skb_setup_tx_timestamp(skb, sockc.tsflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	    !packet_extra_vlan_len_allowed(dev, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 		err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 	skb->protocol = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	skb->priority = sk->sk_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	skb->mark = sockc.mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	skb->tstamp = sockc.transmit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	if (has_vnet_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 		len += sizeof(vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 		virtio_net_hdr_set_proto(skb, &vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	packet_parse_headers(skb, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	if (unlikely(extra_len == 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 		skb->no_fcs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	err = po->xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 		dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	/* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	 * tpacket_snd() will redo the check safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	if (data_race(po->tx_ring.pg_vec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		return tpacket_snd(po, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	return packet_snd(sock, msg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053)  *	Close a PACKET socket. This is fairly simple. We immediately go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054)  *	to 'closed' state and remove our protocol entry in the device list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) static int packet_release(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	struct packet_sock *po;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 	struct packet_fanout *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 	union tpacket_req_u req_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	mutex_lock(&net->packet.sklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	sk_del_node_init_rcu(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	mutex_unlock(&net->packet.sklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	sock_prot_inuse_add(net, sk->sk_prot, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	spin_lock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	unregister_prot_hook(sk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	packet_cached_dev_reset(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	if (po->prot_hook.dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 		dev_put(po->prot_hook.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 		po->prot_hook.dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	spin_unlock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	packet_flush_mclist(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	if (po->rx_ring.pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 		memset(&req_u, 0, sizeof(req_u));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 		packet_set_ring(sk, &req_u, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	if (po->tx_ring.pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 		memset(&req_u, 0, sizeof(req_u));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 		packet_set_ring(sk, &req_u, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 	f = fanout_release(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	kfree(po->rollover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	if (f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 		fanout_release_data(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 		kvfree(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	 *	Now the socket is dead. No more input will appear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	sock_orphan(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	sock->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	/* Purge queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	packet_free_pending(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	sk_refcnt_debug_release(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129)  *	Attach a packet hook.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 			  __be16 proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	struct net_device *dev_curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	__be16 proto_curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	bool need_rehook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	struct net_device *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	bool unlisted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	spin_lock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	if (po->fanout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	if (name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 		dev = dev_get_by_name_rcu(sock_net(sk), name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 		if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 			ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	} else if (ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 			ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 		dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	proto_curr = po->prot_hook.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	dev_curr = po->prot_hook.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	need_rehook = proto_curr != proto || dev_curr != dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	if (need_rehook) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		if (po->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 			/* prevents packet_notifier() from calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 			 * register_prot_hook()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 			WRITE_ONCE(po->num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 			__unregister_prot_hook(sk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 			dev_curr = po->prot_hook.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 			if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 				unlisted = !dev_get_by_index_rcu(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 								 dev->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 		BUG_ON(po->running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 		WRITE_ONCE(po->num, proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		po->prot_hook.type = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		if (unlikely(unlisted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 			dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 			po->prot_hook.dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 			WRITE_ONCE(po->ifindex, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 			packet_cached_dev_reset(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 			po->prot_hook.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 			WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 			packet_cached_dev_assign(po, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	if (dev_curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 		dev_put(dev_curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	if (proto == 0 || !need_rehook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 		register_prot_hook(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 		sk->sk_err = ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 		if (!sock_flag(sk, SOCK_DEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 			sk->sk_error_report(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	spin_unlock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)  *	Bind a packet socket to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 			    int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	char name[sizeof(uaddr->sa_data) + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	 *	Check legality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 	if (addr_len != sizeof(struct sockaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	 * zero-terminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	name[sizeof(uaddr->sa_data)] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	 *	Check legality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	if (addr_len < sizeof(struct sockaddr_ll))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	if (sll->sll_family != AF_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	return packet_do_bind(sk, NULL, sll->sll_ifindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 			      sll->sll_protocol ? : pkt_sk(sk)->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) static struct proto packet_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	.name	  = "PACKET",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	.owner	  = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 	.obj_size = sizeof(struct packet_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)  *	Create a packet of type SOCK_PACKET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) static int packet_create(struct net *net, struct socket *sock, int protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 			 int kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	struct packet_sock *po;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	    sock->type != SOCK_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		return -ESOCKTNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	sock->state = SS_UNCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	if (sk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	sock->ops = &packet_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	if (sock->type == SOCK_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 		sock->ops = &packet_ops_spkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	sock_init_data(sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	init_completion(&po->skb_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	sk->sk_family = PF_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	po->num = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	po->xmit = dev_queue_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	err = packet_alloc_pending(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	packet_cached_dev_reset(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 	sk->sk_destruct = packet_sock_destruct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 	sk_refcnt_debug_inc(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	 *	Attach a protocol block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	spin_lock_init(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	mutex_init(&po->pg_vec_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	po->rollover = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	po->prot_hook.func = packet_rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	if (sock->type == SOCK_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 		po->prot_hook.func = packet_rcv_spkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	po->prot_hook.af_packet_priv = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	po->prot_hook.af_packet_net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	if (proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		po->prot_hook.type = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 		__register_prot_hook(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	mutex_lock(&net->packet.sklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 	sk_add_node_tail_rcu(sk, &net->packet.sklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 	mutex_unlock(&net->packet.sklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 	sock_prot_inuse_add(net, &packet_proto, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	sk_free(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356)  *	Pull a packet from our receive queue and hand it to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357)  *	If necessary we block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 			  int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	int copied, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	int vnet_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 	unsigned int origlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 	/* What error should we return now? EUNATTACH? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	if (pkt_sk(sk)->ifindex < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	if (flags & MSG_ERRQUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 		err = sock_recv_errqueue(sk, msg, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 					 SOL_PACKET, PACKET_TX_TIMESTAMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	 *	Call the generic datagram receiver. This handles all sorts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 	 *	of horrible races and re-entrancy so we can forget about it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	 *	in the protocol layers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 	 *	Now it will return ENETDOWN, if device have just gone down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 	 *	but then it will block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 	 *	An error occurred so return it. Because skb_recv_datagram()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 	 *	handles the blocking we don't see and worry about blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 	 *	retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	packet_rcv_try_clear_pressure(pkt_sk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 	if (pkt_sk(sk)->has_vnet_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 		err = packet_rcv_vnet(msg, skb, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 		vnet_hdr_len = sizeof(struct virtio_net_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	/* You lose any data beyond the buffer you gave. If it worries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	 * a user program they can ask the device for its MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	 * anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	copied = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	if (copied > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 		copied = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 		msg->msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 	if (sock->type != SOCK_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 		/* Original length was stored in sockaddr_ll fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 		origlen = PACKET_SKB_CB(skb)->sa.origlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 		sll->sll_family = AF_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 		sll->sll_protocol = skb->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	sock_recv_ts_and_drops(msg, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	if (msg->msg_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 		const size_t max_len = min(sizeof(skb->cb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 					   sizeof(struct sockaddr_storage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		int copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 		/* If the address length field is there to be filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 		 * in, we fill it in now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 		if (sock->type == SOCK_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 			msg->msg_namelen = sizeof(struct sockaddr_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 			copy_len = msg->msg_namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 			msg->msg_namelen = sll->sll_halen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 				offsetof(struct sockaddr_ll, sll_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 			copy_len = msg->msg_namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 				memset(msg->msg_name +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 				       offsetof(struct sockaddr_ll, sll_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 				       0, sizeof(sll->sll_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 				msg->msg_namelen = sizeof(struct sockaddr_ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 		if (WARN_ON_ONCE(copy_len > max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 			copy_len = max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 			msg->msg_namelen = copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	if (pkt_sk(sk)->auxdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 		struct tpacket_auxdata aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 		aux.tp_status = TP_STATUS_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 		if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 		else if (skb->pkt_type != PACKET_OUTGOING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 			 (skb->ip_summed == CHECKSUM_COMPLETE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 			  skb_csum_unnecessary(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 			aux.tp_status |= TP_STATUS_CSUM_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 		aux.tp_len = origlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 		aux.tp_snaplen = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 		aux.tp_mac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 		aux.tp_net = skb_network_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 		if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 			aux.tp_vlan_tci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 			aux.tp_vlan_tpid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	 *	Free or return the buffer as appropriate. Again this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	 *	hides all the races and re-entrancy issues from us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	skb_free_datagram(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 			       int peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	struct sock *sk	= sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	if (peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 	uaddr->sa_family = AF_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	return sizeof(*uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 			  int peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 	int ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 	if (peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	ifindex = READ_ONCE(po->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	sll->sll_family = AF_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	sll->sll_ifindex = ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	sll->sll_protocol = READ_ONCE(po->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	sll->sll_pkttype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 		sll->sll_hatype = dev->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 		sll->sll_halen = dev->addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 		sll->sll_halen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 			 int what)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	switch (i->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	case PACKET_MR_MULTICAST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 		if (i->alen != dev->addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 		if (what > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 			return dev_mc_add(dev, i->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 			return dev_mc_del(dev, i->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 	case PACKET_MR_PROMISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 		return dev_set_promiscuity(dev, what);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	case PACKET_MR_ALLMULTI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 		return dev_set_allmulti(dev, what);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	case PACKET_MR_UNICAST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 		if (i->alen != dev->addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 		if (what > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 			return dev_uc_add(dev, i->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 			return dev_uc_del(dev, i->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) static void packet_dev_mclist_delete(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 				     struct packet_mclist **mlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	struct packet_mclist *ml;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 	while ((ml = *mlp) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 		if (ml->ifindex == dev->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 			packet_dev_mc(dev, ml, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 			*mlp = ml->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 			kfree(ml);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 			mlp = &ml->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	struct packet_mclist *ml, *i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	if (mreq->mr_alen > dev->addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	i = kmalloc(sizeof(*i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 	if (i == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	for (ml = po->mclist; ml; ml = ml->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 		if (ml->ifindex == mreq->mr_ifindex &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 		    ml->type == mreq->mr_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 		    ml->alen == mreq->mr_alen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 			ml->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 			/* Free the new element ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 			kfree(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	i->type = mreq->mr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 	i->ifindex = mreq->mr_ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 	i->alen = mreq->mr_alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 	memcpy(i->addr, mreq->mr_address, i->alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 	i->count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	i->next = po->mclist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	po->mclist = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	err = packet_dev_mc(dev, i, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 		po->mclist = i->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 		kfree(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	struct packet_mclist *ml, **mlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 		if (ml->ifindex == mreq->mr_ifindex &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 		    ml->type == mreq->mr_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 		    ml->alen == mreq->mr_alen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 			if (--ml->count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 				struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 				*mlp = ml->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 				if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 					packet_dev_mc(dev, ml, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 				kfree(ml);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) static void packet_flush_mclist(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	struct packet_mclist *ml;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	if (!po->mclist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 	while ((ml = po->mclist) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 		struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 		po->mclist = ml->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 		if (dev != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 			packet_dev_mc(dev, ml, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 		kfree(ml);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 		  unsigned int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	if (level != SOL_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 		return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 	case PACKET_ADD_MEMBERSHIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 	case PACKET_DROP_MEMBERSHIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 		struct packet_mreq_max mreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 		int len = optlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 		memset(&mreq, 0, sizeof(mreq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 		if (len < sizeof(struct packet_mreq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 		if (len > sizeof(mreq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 			len = sizeof(mreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 		if (copy_from_sockptr(&mreq, optval, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 		if (optname == PACKET_ADD_MEMBERSHIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 			ret = packet_mc_add(sk, &mreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 			ret = packet_mc_drop(sk, &mreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 	case PACKET_RX_RING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	case PACKET_TX_RING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		union tpacket_req_u req_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 		lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 		switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 		case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 		case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 			len = sizeof(req_u.req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 		case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 			len = sizeof(req_u.req3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 		if (optlen < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 			if (copy_from_sockptr(&req_u.req, optval, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 				ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 				ret = packet_set_ring(sk, &req_u, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 						    optname == PACKET_TX_RING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 	case PACKET_COPY_THRESH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 		int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 		if (optlen != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 		pkt_sk(sk)->copy_thresh = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 	case PACKET_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 		if (optlen != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 		switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 		case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 		case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 		case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 		lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 			po->tp_version = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 	case PACKET_RESERVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 		unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 		if (optlen != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 		if (val > INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 		lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 			po->tp_reserve = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 	case PACKET_LOSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 		unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 		if (optlen != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 		lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 			po->tp_loss = !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 	case PACKET_AUXDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 		int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 		if (optlen < sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 		lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 		po->auxdata = !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 	case PACKET_ORIGDEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 		int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 		if (optlen < sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 		lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 		po->origdev = !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 	case PACKET_VNET_HDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 		int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 		if (sock->type != SOCK_RAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 		if (optlen < sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 		lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 			po->has_vnet_hdr = !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 	case PACKET_TIMESTAMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 		int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 		if (optlen != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 		po->tp_tstamp = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 	case PACKET_FANOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 		struct fanout_args args = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 		if (optlen != sizeof(int) && optlen != sizeof(args))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 		if (copy_from_sockptr(&args, optval, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 		return fanout_add(sk, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 	case PACKET_FANOUT_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 		/* Paired with the WRITE_ONCE() in fanout_add() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 		if (!READ_ONCE(po->fanout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 		return fanout_set_data(po, optval, optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 	case PACKET_IGNORE_OUTGOING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 		int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 		if (optlen != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 		if (val < 0 || val > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 		po->prot_hook.ignore_outgoing = !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	case PACKET_TX_HAS_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 		unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 		if (optlen != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 		lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 			po->tp_tx_has_off = !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 	case PACKET_QDISC_BYPASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 		if (optlen != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 		if (copy_from_sockptr(&val, optval, sizeof(val)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 		return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) static int packet_getsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 			     char __user *optval, int __user *optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	int val, lv = sizeof(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 	void *data = &val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 	union tpacket_stats_u st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 	struct tpacket_rollover_stats rstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 	int drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 	if (level != SOL_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 		return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 	if (get_user(len, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 	switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 	case PACKET_STATISTICS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 		spin_lock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 		memcpy(&st, &po->stats, sizeof(st));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 		memset(&po->stats, 0, sizeof(po->stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 		spin_unlock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 		drops = atomic_xchg(&po->tp_drops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 		if (po->tp_version == TPACKET_V3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 			lv = sizeof(struct tpacket_stats_v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 			st.stats3.tp_drops = drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 			st.stats3.tp_packets += drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 			data = &st.stats3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 			lv = sizeof(struct tpacket_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 			st.stats1.tp_drops = drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 			st.stats1.tp_packets += drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 			data = &st.stats1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 	case PACKET_AUXDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 		val = po->auxdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 	case PACKET_ORIGDEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 		val = po->origdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 	case PACKET_VNET_HDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 		val = po->has_vnet_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 	case PACKET_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 		val = po->tp_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 	case PACKET_HDRLEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 		if (len > sizeof(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 			len = sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 		if (len < sizeof(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 		if (copy_from_user(&val, optval, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 		switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 		case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 			val = sizeof(struct tpacket_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 		case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 			val = sizeof(struct tpacket2_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 		case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 			val = sizeof(struct tpacket3_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 	case PACKET_RESERVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 		val = po->tp_reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 	case PACKET_LOSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 		val = po->tp_loss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	case PACKET_TIMESTAMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 		val = po->tp_tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 	case PACKET_FANOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 		val = (po->fanout ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 		       ((u32)po->fanout->id |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 			((u32)po->fanout->type << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 			((u32)po->fanout->flags << 24)) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 		       0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 	case PACKET_IGNORE_OUTGOING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 		val = po->prot_hook.ignore_outgoing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 	case PACKET_ROLLOVER_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 		if (!po->rollover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 		rstats.tp_all = atomic_long_read(&po->rollover->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 		data = &rstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 		lv = sizeof(rstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 	case PACKET_TX_HAS_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 		val = po->tp_tx_has_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 	case PACKET_QDISC_BYPASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 		val = packet_use_direct_xmit(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 		return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 	if (len > lv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 		len = lv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 	if (put_user(len, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 	if (copy_to_user(optval, data, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) static int packet_notifier(struct notifier_block *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 			   unsigned long msg, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 	struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 	sk_for_each_rcu(sk, &net->packet.sklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 		struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 		switch (msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 		case NETDEV_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 			if (po->mclist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 				packet_dev_mclist_delete(dev, &po->mclist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 		case NETDEV_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 			if (dev->ifindex == po->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 				spin_lock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 				if (po->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 					__unregister_prot_hook(sk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 					sk->sk_err = ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 					if (!sock_flag(sk, SOCK_DEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 						sk->sk_error_report(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 				if (msg == NETDEV_UNREGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 					packet_cached_dev_reset(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 					WRITE_ONCE(po->ifindex, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 					if (po->prot_hook.dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 						dev_put(po->prot_hook.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 					po->prot_hook.dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 				spin_unlock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 		case NETDEV_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 			if (dev->ifindex == po->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 				spin_lock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 				if (po->num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 					register_prot_hook(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 				spin_unlock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) static int packet_ioctl(struct socket *sock, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 			unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	case SIOCOUTQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 		int amount = sk_wmem_alloc_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 		return put_user(amount, (int __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 	case SIOCINQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 		int amount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 		spin_lock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 		skb = skb_peek(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 		if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 			amount = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 		spin_unlock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 		return put_user(amount, (int __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) #ifdef CONFIG_INET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 	case SIOCADDRT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 	case SIOCDELRT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 	case SIOCDARP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 	case SIOCGARP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 	case SIOCSARP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 	case SIOCGIFADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 	case SIOCSIFADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 	case SIOCGIFBRDADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 	case SIOCSIFBRDADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 	case SIOCGIFNETMASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 	case SIOCSIFNETMASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 	case SIOCGIFDSTADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 	case SIOCSIFDSTADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 	case SIOCSIFFLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 		return inet_dgram_ops.ioctl(sock, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 		return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) static __poll_t packet_poll(struct file *file, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 				poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 	__poll_t mask = datagram_poll(file, sock, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 	spin_lock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 	if (po->rx_ring.pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 		if (!packet_previous_rx_frame(po, &po->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 			TP_STATUS_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 			mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 	packet_rcv_try_clear_pressure(po);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 	spin_unlock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 	spin_lock_bh(&sk->sk_write_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 	if (po->tx_ring.pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 			mask |= EPOLLOUT | EPOLLWRNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 	spin_unlock_bh(&sk->sk_write_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) /* Dirty? Well, I still did not learn better way to account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224)  * for user mmaps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) static void packet_mm_open(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 	struct file *file = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	struct socket *sock = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 	if (sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 		atomic_inc(&pkt_sk(sk)->mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) static void packet_mm_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 	struct file *file = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 	struct socket *sock = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 	if (sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 		atomic_dec(&pkt_sk(sk)->mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) static const struct vm_operations_struct packet_mmap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 	.open	=	packet_mm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 	.close	=	packet_mm_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 			unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 	for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 		if (likely(pg_vec[i].buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 			if (is_vmalloc_addr(pg_vec[i].buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 				vfree(pg_vec[i].buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 				free_pages((unsigned long)pg_vec[i].buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 					   order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 			pg_vec[i].buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 	kfree(pg_vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) static char *alloc_one_pg_vec_page(unsigned long order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 	char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 	buffer = (char *) __get_free_pages(gfp_flags, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 	if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 		return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 	/* __get_free_pages failed, fall back to vmalloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 	if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 		return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 	/* vmalloc failed, lets dig into swap here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 	gfp_flags &= ~__GFP_NORETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 	buffer = (char *) __get_free_pages(gfp_flags, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 	if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 		return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 	/* complete and utter failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 	unsigned int block_nr = req->tp_block_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 	struct pgv *pg_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 	if (unlikely(!pg_vec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 	for (i = 0; i < block_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 		if (unlikely(!pg_vec[i].buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 			goto out_free_pgvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 	return pg_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) out_free_pgvec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 	free_pg_vec(pg_vec, order, block_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 	pg_vec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 		int closing, int tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 	struct pgv *pg_vec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 	unsigned long *rx_owner_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 	int was_running, order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 	struct packet_ring_buffer *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 	struct sk_buff_head *rb_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 	__be16 num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 	/* Added to avoid minimal code churn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 	struct tpacket_req *req = &req_u->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 	err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 	if (!closing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 		if (atomic_read(&po->mapped))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 		if (packet_read_pending(rb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 	if (req->tp_block_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 		unsigned int min_frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 		/* Sanity tests and some calculations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 		err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 		if (unlikely(rb->pg_vec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 		switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 		case TPACKET_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 			po->tp_hdrlen = TPACKET_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 		case TPACKET_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 			po->tp_hdrlen = TPACKET2_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 		case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 			po->tp_hdrlen = TPACKET3_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 		if (unlikely((int)req->tp_block_size <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 		min_frame_size = po->tp_hdrlen + po->tp_reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) 		if (po->tp_version >= TPACKET_V3 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 		    req->tp_block_size <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 		if (unlikely(req->tp_frame_size < min_frame_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 		if (unlikely(rb->frames_per_block == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 					req->tp_frame_nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 		order = get_order(req->tp_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 		pg_vec = alloc_pg_vec(req, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 		if (unlikely(!pg_vec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 		switch (po->tp_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 		case TPACKET_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 			/* Block transmit is not supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 			if (!tx_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 				init_prb_bdqc(po, rb, pg_vec, req_u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 				struct tpacket_req3 *req3 = &req_u->req3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 				if (req3->tp_retire_blk_tov ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 				    req3->tp_sizeof_priv ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 				    req3->tp_feature_req_word) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 					err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 					goto out_free_pg_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 			if (!tx_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 				if (!rx_owner_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 					goto out_free_pg_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 	/* Done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 		if (unlikely(req->tp_frame_nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 	/* Detach socket from network */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 	spin_lock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 	was_running = po->running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 	num = po->num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 	if (was_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 		WRITE_ONCE(po->num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 		__unregister_prot_hook(sk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 	spin_unlock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 	synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 	err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 	mutex_lock(&po->pg_vec_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 	if (closing || atomic_read(&po->mapped) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 		spin_lock_bh(&rb_queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 		swap(rb->pg_vec, pg_vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 		if (po->tp_version <= TPACKET_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 			swap(rb->rx_owner_map, rx_owner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 		rb->frame_max = (req->tp_frame_nr - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 		rb->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 		rb->frame_size = req->tp_frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 		spin_unlock_bh(&rb_queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 		swap(rb->pg_vec_order, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 		swap(rb->pg_vec_len, req->tp_block_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) 						tpacket_rcv : packet_rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) 		skb_queue_purge(rb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 		if (atomic_read(&po->mapped))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 			pr_err("packet_mmap: vma is busy: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 			       atomic_read(&po->mapped));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 	mutex_unlock(&po->pg_vec_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 	spin_lock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 	if (was_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 		WRITE_ONCE(po->num, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 		register_prot_hook(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 	spin_unlock(&po->bind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 	if (pg_vec && (po->tp_version > TPACKET_V2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 		/* Because we don't support block-based V3 on tx-ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 		if (!tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 			prb_shutdown_retire_blk_timer(po, rb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) out_free_pg_vec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 	if (pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 		bitmap_free(rx_owner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 		free_pg_vec(pg_vec, order, req->tp_block_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) static int packet_mmap(struct file *file, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 		struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 	struct packet_sock *po = pkt_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 	unsigned long size, expected_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 	struct packet_ring_buffer *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 	unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 	if (vma->vm_pgoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 	mutex_lock(&po->pg_vec_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) 	expected_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 		if (rb->pg_vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 			expected_size += rb->pg_vec_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 						* rb->pg_vec_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 						* PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) 	if (expected_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 	size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 	if (size != expected_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 	start = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 		if (rb->pg_vec == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) 		for (i = 0; i < rb->pg_vec_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 			struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) 			void *kaddr = rb->pg_vec[i].buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 			int pg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 				page = pgv_to_page(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 				err = vm_insert_page(vma, start, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 				if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 				start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 				kaddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 	atomic_inc(&po->mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 	vma->vm_ops = &packet_mmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 	mutex_unlock(&po->pg_vec_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) static const struct proto_ops packet_ops_spkt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 	.family =	PF_PACKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 	.owner =	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 	.release =	packet_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 	.bind =		packet_bind_spkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 	.connect =	sock_no_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 	.socketpair =	sock_no_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 	.accept =	sock_no_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 	.getname =	packet_getname_spkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 	.poll =		datagram_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 	.ioctl =	packet_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 	.gettstamp =	sock_gettstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 	.listen =	sock_no_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 	.shutdown =	sock_no_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 	.sendmsg =	packet_sendmsg_spkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 	.recvmsg =	packet_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 	.mmap =		sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 	.sendpage =	sock_no_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) static const struct proto_ops packet_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 	.family =	PF_PACKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) 	.owner =	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 	.release =	packet_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 	.bind =		packet_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) 	.connect =	sock_no_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 	.socketpair =	sock_no_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 	.accept =	sock_no_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 	.getname =	packet_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 	.poll =		packet_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 	.ioctl =	packet_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 	.gettstamp =	sock_gettstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 	.listen =	sock_no_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 	.shutdown =	sock_no_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 	.setsockopt =	packet_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 	.getsockopt =	packet_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 	.sendmsg =	packet_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 	.recvmsg =	packet_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 	.mmap =		packet_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 	.sendpage =	sock_no_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) static const struct net_proto_family packet_family_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 	.family =	PF_PACKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 	.create =	packet_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 	.owner	=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) static struct notifier_block packet_netdev_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 	.notifier_call =	packet_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 	__acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 	struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) 	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 	struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) static void packet_seq_stop(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 	__releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) static int packet_seq_show(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 	if (v == SEQ_START_TOKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) 		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) 		struct sock *s = sk_entry(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) 		const struct packet_sock *po = pkt_sk(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 		seq_printf(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) 			   s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) 			   refcount_read(&s->sk_refcnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) 			   s->sk_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) 			   ntohs(READ_ONCE(po->num)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) 			   READ_ONCE(po->ifindex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) 			   po->running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) 			   atomic_read(&s->sk_rmem_alloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) 			   sock_i_ino(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) static const struct seq_operations packet_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) 	.start	= packet_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 	.next	= packet_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) 	.stop	= packet_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) 	.show	= packet_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) static int __net_init packet_net_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) 	mutex_init(&net->packet.sklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) 	INIT_HLIST_HEAD(&net->packet.sklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) 	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) 			sizeof(struct seq_net_private)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) #endif /* CONFIG_PROC_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) static void __net_exit packet_net_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 	remove_proc_entry("packet", net->proc_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) 	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) static struct pernet_operations packet_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) 	.init = packet_net_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) 	.exit = packet_net_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) static void __exit packet_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 	unregister_netdevice_notifier(&packet_netdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 	unregister_pernet_subsys(&packet_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 	sock_unregister(PF_PACKET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 	proto_unregister(&packet_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) static int __init packet_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 	rc = proto_register(&packet_proto, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 	rc = sock_register(&packet_family_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) 		goto out_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 	rc = register_pernet_subsys(&packet_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) 		goto out_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) 	rc = register_netdevice_notifier(&packet_netdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) 		goto out_pernet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) out_pernet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	unregister_pernet_subsys(&packet_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) out_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 	sock_unregister(PF_PACKET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) out_proto:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 	proto_unregister(&packet_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) module_init(packet_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) module_exit(packet_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) MODULE_ALIAS_NETPROTO(PF_PACKET);