Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *	NET3:	Implementation of the ICMP protocol layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *	Some of the function names and the icmp unreach table for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *	module were derived from [icmp.c 1.0.11 06/02/93] by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *	Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *	Other than that this module is a complete rewrite.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *	Fixes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *	Clemens Fruhwirth	:	introduce global icmp rate limiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *					with icmp type masking ability instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *					of broken per type icmp timeouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *		Mike Shaver	:	RFC1122 checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *		Alan Cox	:	Multicast ping reply as self.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *		Alan Cox	:	Fix atomicity lockup in ip_build_xmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *					call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *		Alan Cox	:	Added 216,128 byte paths to the MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *					code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *		Martin Mares	:	RFC1812 checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *		Martin Mares	:	Can be configured to follow redirects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *					if acting as a router _without_ a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *					routing protocol (RFC 1812).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *		Martin Mares	:	Echo requests may be configured to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *					be ignored (RFC 1812).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *		Martin Mares	:	Limitation of ICMP error message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *					transmit rate (RFC 1812).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *		Martin Mares	:	TOS and Precedence set correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *					(RFC 1812).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *		Martin Mares	:	Now copying as much data from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *					original packet as we can without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *					exceeding 576 bytes (RFC 1812).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *	Willy Konynenberg	:	Transparent proxying support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *		Keith Owens	:	RFC1191 correction for 4.2BSD based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *					path MTU bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *		Thomas Quinot	:	ICMP Dest Unreach codes up to 15 are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *					valid (RFC 1812).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  *		Andi Kleen	:	Check all packet lengths properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *					and moved all kfree_skb() up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *					icmp_rcv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *		Andi Kleen	:	Move the rate limit bookkeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  *					into the dest entry and use a token
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *					bucket filter (thanks to ANK). Make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *					the rates sysctl configurable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  *		Yu Tianli	:	Fixed two ugly bugs in icmp_send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *					- IP option length was accounted wrongly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *					- ICMP header length was not accounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  *					  at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  *              Tristan Greaves :       Added sysctl option to ignore bogus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  *              			broadcast responses from broken routers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * To Fix:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *	- Should use skb_pull() instead of all the manual checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  *	  This would also greatly simply some upper layer error handlers. --AK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #include <linux/netfilter_ipv4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #include <net/snmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #include <net/protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #include <net/icmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #include <net/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #include <net/raw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #include <net/ping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #include <net/xfrm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #include <net/ip_fib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #include <net/l3mdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  *	Build xmit assembly blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) struct icmp_bxm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	int data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		struct icmphdr icmph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		__be32	       times[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	} data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	int head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	struct ip_options_data replyopts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) /* An array of errno for error messages from dest unreach. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) const struct icmp_err icmp_err_convert[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		.errno = ENETUNREACH,	/* ICMP_NET_UNREACH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		.fatal = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		.errno = EHOSTUNREACH,	/* ICMP_HOST_UNREACH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		.fatal = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		.errno = ENOPROTOOPT	/* ICMP_PROT_UNREACH */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		.errno = ECONNREFUSED,	/* ICMP_PORT_UNREACH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		.errno = EMSGSIZE,	/* ICMP_FRAG_NEEDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		.fatal = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		.errno = EOPNOTSUPP,	/* ICMP_SR_FAILED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		.fatal = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		.errno = ENETUNREACH,	/* ICMP_NET_UNKNOWN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		.errno = EHOSTDOWN,	/* ICMP_HOST_UNKNOWN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		.errno = ENONET,	/* ICMP_HOST_ISOLATED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		.errno = ENETUNREACH,	/* ICMP_NET_ANO	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		.errno = EHOSTUNREACH,	/* ICMP_HOST_ANO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		.errno = ENETUNREACH,	/* ICMP_NET_UNR_TOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		.fatal = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		.errno = EHOSTUNREACH,	/* ICMP_HOST_UNR_TOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		.fatal = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		.errno = EHOSTUNREACH,	/* ICMP_PKT_FILTERED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		.errno = EHOSTUNREACH,	/* ICMP_PREC_VIOLATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		.errno = EHOSTUNREACH,	/* ICMP_PREC_CUTOFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		.fatal = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) EXPORT_SYMBOL(icmp_err_convert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  *	ICMP control array. This specifies what to do with each ICMP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) struct icmp_control {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	bool (*handler)(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	short   error;		/* This ICMP is classed as an error message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  *	The ICMP socket(s). This is the most convenient way to flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  *	our ICMP output as well as maintain a clean interface throughout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  *	all layers. All Socketless IP sends will soon be gone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  *	On SMP we have one ICMP socket per-cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) static struct sock *icmp_sk(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	return this_cpu_read(*net->ipv4.icmp_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) /* Called with BH disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static inline struct sock *icmp_xmit_lock(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	sk = icmp_sk(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		/* This can happen if the output path signals a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		 * dst_link_failure() for an outgoing ICMP packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static inline void icmp_xmit_unlock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	spin_unlock(&sk->sk_lock.slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) int sysctl_icmp_msgs_burst __read_mostly = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	spinlock_t	lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	u32		credit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	u32		stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) } icmp_global = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	.lock		= __SPIN_LOCK_UNLOCKED(icmp_global.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  * icmp_global_allow - Are we allowed to send one more ICMP message ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  * Returns false if we reached the limit and can not send another packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  * Note: called with BH disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) bool icmp_global_allow(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u32 credit, delta, incr = 0, now = (u32)jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	bool rc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	/* Check if token bucket is empty and cannot be refilled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	 * without taking the spinlock. The READ_ONCE() are paired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 * with the following WRITE_ONCE() in this same function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	if (!READ_ONCE(icmp_global.credit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		if (delta < HZ / 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	spin_lock(&icmp_global.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	delta = min_t(u32, now - icmp_global.stamp, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	if (delta >= HZ / 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		if (incr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			WRITE_ONCE(icmp_global.stamp, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	if (credit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		/* We want to use a credit of one in average, but need to randomize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		 * it for security reasons.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		credit = max_t(int, credit - prandom_u32_max(3), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		rc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	WRITE_ONCE(icmp_global.credit, credit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	spin_unlock(&icmp_global.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) EXPORT_SYMBOL(icmp_global_allow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static bool icmpv4_mask_allow(struct net *net, int type, int code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	if (type > NR_ICMP_TYPES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	/* Don't limit PMTU discovery. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	/* Limit if icmp type is enabled in ratemask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static bool icmpv4_global_allow(struct net *net, int type, int code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (icmpv4_mask_allow(net, type, code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	if (icmp_global_allow())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  *	Send an ICMP frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			       struct flowi4 *fl4, int type, int code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	struct dst_entry *dst = &rt->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	struct inet_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	bool rc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	int vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (icmpv4_mask_allow(net, type, code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	/* No rate limit on loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	vif = l3mdev_master_ifindex(dst->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		inet_putpeer(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  *	Maintain the counters used in the SNMP statistics for outgoing ICMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) void icmp_out_count(struct net *net, unsigned char type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	ICMPMSGOUT_INC_STATS(net, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  *	Checksum each fragment, and on the first include the headers and final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  *	checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			  struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	__wsum csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	csum = skb_copy_and_csum_bits(icmp_param->skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 				      icmp_param->offset + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				      to, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	skb->csum = csum_block_add(skb->csum, csum, odd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	if (icmp_pointers[icmp_param->data.icmph.type].error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		nf_ct_attach(skb, icmp_param->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static void icmp_push_reply(struct icmp_bxm *icmp_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			    struct flowi4 *fl4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			    struct ipcm_cookie *ipc, struct rtable **rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	sk = icmp_sk(dev_net((*rt)->dst.dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			   icmp_param->data_len+icmp_param->head_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			   icmp_param->head_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			   ipc, rt, MSG_DONTWAIT) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		__ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		ip_flush_pending_frames(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		struct icmphdr *icmph = icmp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		__wsum csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		struct sk_buff *skb1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 						 (char *)icmph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 						 icmp_param->head_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		skb_queue_walk(&sk->sk_write_queue, skb1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 			csum = csum_add(csum, skb1->csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		icmph->checksum = csum_fold(csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		skb->ip_summed = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		ip_push_pending_frames(sk, fl4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  *	Driving logic for building and sending ICMP messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct ipcm_cookie ipc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	struct rtable *rt = skb_rtable(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct net *net = dev_net(rt->dst.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct flowi4 fl4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct inet_sock *inet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	__be32 daddr, saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	u32 mark = IP4_REPLY_MARK(net, skb->mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	int type = icmp_param->data.icmph.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	int code = icmp_param->data.icmph.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	/* Needed by both icmp_global_allow and icmp_xmit_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	/* global icmp_msgs_per_sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (!icmpv4_global_allow(net, type, code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		goto out_bh_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	sk = icmp_xmit_lock(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		goto out_bh_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	icmp_param->data.icmph.checksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	ipcm_init(&ipc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	inet->tos = ip_hdr(skb)->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	ipc.sockc.mark = mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	daddr = ipc.addr = ip_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	saddr = fib_compute_spec_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if (icmp_param->replyopts.opt.opt.optlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		ipc.opt = &icmp_param->replyopts.opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		if (ipc.opt->opt.srr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			daddr = icmp_param->replyopts.opt.opt.faddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	memset(&fl4, 0, sizeof(fl4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	fl4.daddr = daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	fl4.saddr = saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	fl4.flowi4_mark = mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	fl4.flowi4_uid = sock_net_uid(net, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	fl4.flowi4_proto = IPPROTO_ICMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	rt = ip_route_output_key(net, &fl4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (IS_ERR(rt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	ip_rt_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	icmp_xmit_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) out_bh_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  * The device used for looking up which routing table to use for sending an ICMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * error is preferably the source whenever it is set, which should ensure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  * icmp error can be sent to the source host, else lookup using the routing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  * table of the destination device, else use the main routing table (index 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct net_device *route_lookup_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (skb->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		route_lookup_dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	else if (skb_dst(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		route_lookup_dev = skb_dst(skb)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	return route_lookup_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static struct rtable *icmp_route_lookup(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 					struct flowi4 *fl4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 					struct sk_buff *skb_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 					const struct iphdr *iph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 					__be32 saddr, u8 tos, u32 mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 					int type, int code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 					struct icmp_bxm *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	struct net_device *route_lookup_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	struct rtable *rt, *rt2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	struct flowi4 fl4_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	memset(fl4, 0, sizeof(*fl4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	fl4->daddr = (param->replyopts.opt.opt.srr ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		      param->replyopts.opt.opt.faddr : iph->saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	fl4->saddr = saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	fl4->flowi4_mark = mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	fl4->flowi4_uid = sock_net_uid(net, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	fl4->flowi4_tos = RT_TOS(tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	fl4->flowi4_proto = IPPROTO_ICMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	fl4->fl4_icmp_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	fl4->fl4_icmp_code = code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	route_lookup_dev = icmp_get_route_lookup_dev(skb_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	rt = ip_route_output_key_hash(net, fl4, skb_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	if (IS_ERR(rt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		return rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	/* No need to clone since we're just using its address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	rt2 = rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 					   flowi4_to_flowi(fl4), NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	if (!IS_ERR(rt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		if (rt != rt2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			return rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	} else if (PTR_ERR(rt) == -EPERM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		rt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		return rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		goto relookup_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	if (inet_addr_type_dev_table(net, route_lookup_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 				     fl4_dec.saddr) == RTN_LOCAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		rt2 = __ip_route_output_key(net, &fl4_dec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		if (IS_ERR(rt2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			err = PTR_ERR(rt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		struct flowi4 fl4_2 = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		unsigned long orefdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		fl4_2.daddr = fl4_dec.saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		rt2 = ip_route_output_key(net, &fl4_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		if (IS_ERR(rt2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			err = PTR_ERR(rt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			goto relookup_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		/* Ugh! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		orefdst = skb_in->_skb_refdst; /* save old refdst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		skb_dst_set(skb_in, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 				     RT_TOS(tos), rt2->dst.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		dst_release(&rt2->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		rt2 = skb_rtable(skb_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		skb_in->_skb_refdst = orefdst; /* restore old refdst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		goto relookup_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 					    flowi4_to_flowi(&fl4_dec), NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 					    XFRM_LOOKUP_ICMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (!IS_ERR(rt2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		dst_release(&rt->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		memcpy(fl4, &fl4_dec, sizeof(*fl4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		rt = rt2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	} else if (PTR_ERR(rt2) == -EPERM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		if (rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			dst_release(&rt->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		return rt2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		err = PTR_ERR(rt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		goto relookup_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	return rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) relookup_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	if (rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		return rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  *	Send an ICMP message in response to a situation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  *	RFC 1122: 3.2.2	MUST send at least the IP header and 8 bytes of header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  *		  MAY send more (we do).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  *			MUST NOT change this header information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  *			MUST NOT reply to a multicast/broadcast IP address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  *			MUST NOT reply to a multicast/broadcast MAC address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  *			MUST reply to only the first fragment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		 const struct ip_options *opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	int room;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	struct icmp_bxm icmp_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	struct rtable *rt = skb_rtable(skb_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	struct ipcm_cookie ipc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	struct flowi4 fl4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	__be32 saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	u8  tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	u32 mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (!rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (rt->dst.dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		net = dev_net(rt->dst.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	else if (skb_in->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		net = dev_net(skb_in->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 *	Find the original header. It is expected to be valid, of course.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 *	Check this, icmp_send is called from the most obscure devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 *	sometimes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	iph = ip_hdr(skb_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if ((u8 *)iph < skb_in->head ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	    (skb_network_header(skb_in) + sizeof(*iph)) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	    skb_tail_pointer(skb_in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 *	No replies to physical multicast/broadcast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (skb_in->pkt_type != PACKET_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	 *	Now check at the protocol level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	 *	Only reply to fragment 0. We byte re-order the constant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	 *	mask for efficiency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (iph->frag_off & htons(IP_OFFSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	 *	If we send an ICMP error to an ICMP error a mess would result..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	if (icmp_pointers[type].error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		 *	We are an error, check if we are replying to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		 *	ICMP error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		if (iph->protocol == IPPROTO_ICMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			u8 _inner_type, *itp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			itp = skb_header_pointer(skb_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 						 skb_network_header(skb_in) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 						 (iph->ihl << 2) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 						 offsetof(struct icmphdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 							  type) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 						 skb_in->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 						 sizeof(_inner_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 						 &_inner_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			if (!itp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			 *	Assume any unknown ICMP type is an error. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			 *	isn't specified by the RFC, but think about it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			if (*itp > NR_ICMP_TYPES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			    icmp_pointers[*itp].error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	/* Needed by both icmp_global_allow and icmp_xmit_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	/* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	 * incoming dev is loopback.  If outgoing dev change to not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	      !icmpv4_global_allow(net, type, code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		goto out_bh_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	sk = icmp_xmit_lock(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		goto out_bh_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	 *	Construct source address and options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	saddr = iph->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (!(rt->rt_flags & RTCF_LOCAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		struct net_device *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		if (rt_is_input_route(rt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		    net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			saddr = inet_select_addr(dev, iph->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 						 RT_SCOPE_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			saddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	tos = icmp_pointers[type].error ? (RT_TOS(iph->tos) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 					   IPTOS_PREC_INTERNETCONTROL) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 					   iph->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	mark = IP4_REPLY_MARK(net, skb_in->mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 *	Prepare data for ICMP header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	icmp_param.data.icmph.type	 = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	icmp_param.data.icmph.code	 = code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	icmp_param.data.icmph.un.gateway = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	icmp_param.data.icmph.checksum	 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	icmp_param.skb	  = skb_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	icmp_param.offset = skb_network_offset(skb_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	inet_sk(sk)->tos = tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	ipcm_init(&ipc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	ipc.addr = iph->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	ipc.opt = &icmp_param.replyopts.opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	ipc.sockc.mark = mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			       type, code, &icmp_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (IS_ERR(rt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	/* peer icmp_ratelimit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		goto ende;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	/* RFC says return as much as we can without exceeding 576 bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	room = dst_mtu(&rt->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (room > 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		room = 576;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	room -= sizeof(struct icmphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	icmp_param.data_len = skb_in->len - icmp_param.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (icmp_param.data_len > room)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		icmp_param.data_len = room;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	icmp_param.head_len = sizeof(struct icmphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	/* if we don't have a source address at this point, fall back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	 * dummy address instead of sending out a packet with a source address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	 * of 0.0.0.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	if (!fl4.saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		fl4.saddr = htonl(INADDR_DUMMY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) ende:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	ip_rt_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	icmp_xmit_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) out_bh_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) out:;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) EXPORT_SYMBOL(__icmp_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) #if IS_ENABLED(CONFIG_NF_NAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) #include <net/netfilter/nf_conntrack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct sk_buff *cloned_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	struct ip_options opts = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	enum ip_conntrack_info ctinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	struct nf_conn *ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	__be32 orig_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	ct = nf_ct_get(skb_in, &ctinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	if (!ct || !(ct->status & IPS_SRC_NAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		__icmp_send(skb_in, type, code, info, &opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (skb_shared(skb_in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	    (skb_network_header(skb_in) + sizeof(struct iphdr)) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	    skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	    skb_network_offset(skb_in) + sizeof(struct iphdr))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	orig_ip = ip_hdr(skb_in)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	__icmp_send(skb_in, type, code, info, &opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	ip_hdr(skb_in)->saddr = orig_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	consume_skb(cloned_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) EXPORT_SYMBOL(icmp_ndo_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	const struct iphdr *iph = (const struct iphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	const struct net_protocol *ipprot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	int protocol = iph->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	/* Checkin full IP header plus 8 bytes of protocol to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	 * avoid additional coding at protocol handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	raw_icmp_error(skb, protocol, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	ipprot = rcu_dereference(inet_protos[protocol]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (ipprot && ipprot->err_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		ipprot->err_handler(skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static bool icmp_tag_validation(int proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	bool ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	return ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847)  *	Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848)  *	ICMP_PARAMETERPROB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) static bool icmp_unreach(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	const struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	struct icmphdr *icmph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	u32 info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	net = dev_net(skb_dst(skb)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	 *	Incomplete header ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	 * 	Only checks for the IP header, there should be an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 *	additional check for longer headers in upper levels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	icmph = icmp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	iph   = (const struct iphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	if (iph->ihl < 5) /* Mangled header, drop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	switch (icmph->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	case ICMP_DEST_UNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		switch (icmph->code & 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		case ICMP_NET_UNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		case ICMP_HOST_UNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		case ICMP_PROT_UNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		case ICMP_PORT_UNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		case ICMP_FRAG_NEEDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			/* for documentation of the ip_no_pmtu_disc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			 * values please see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			 * Documentation/networking/ip-sysctl.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			switch (net->ipv4.sysctl_ip_no_pmtu_disc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 				net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 						    &iph->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 				if (!icmp_tag_validation(iph->protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 				fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 				info = ntohs(icmph->un.frag.mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		case ICMP_SR_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			net_dbg_ratelimited("%pI4: Source Route Failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 					    &iph->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		if (icmph->code > NR_ICMP_UNREACH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	case ICMP_PARAMETERPROB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		info = ntohl(icmph->un.gateway) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	case ICMP_TIME_EXCEEDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		__ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		if (icmph->code == ICMP_EXC_FRAGTIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	 *	Throw it at our lower layers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	 *	RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	 *		  header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	 *	RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 *		  transport layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 *	RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 *		  transport layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	 *	Check the other end isn't violating RFC 1122. Some routers send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	 *	bogus responses to broadcast frames. If you see this message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	 *	first check your netmask matches at both ends, if it does then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 *	get the other vendor to fix their kit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	    inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 				     &ip_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 				     icmph->type, icmph->code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 				     &iph->daddr, skb->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	icmp_socket_deliver(skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  *	Handle ICMP_REDIRECT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) static bool icmp_redirect(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (skb->len < sizeof(struct iphdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (!pskb_may_pull(skb, sizeof(struct iphdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		/* there aught to be a stat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  *	Handle ICMP_ECHO ("ping") requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  *	RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  *		  requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  *	RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  *		  included in the reply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  *	RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988)  *		  echo requests, MUST have default=NOT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989)  *	See also WRT handling of options once they are done and working.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) static bool icmp_echo(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	net = dev_net(skb_dst(skb)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		struct icmp_bxm icmp_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		icmp_param.data.icmph	   = *icmp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		icmp_param.data.icmph.type = ICMP_ECHOREPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		icmp_param.skb		   = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		icmp_param.offset	   = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		icmp_param.data_len	   = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		icmp_param.head_len	   = sizeof(struct icmphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		icmp_reply(&icmp_param, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	/* should there be an ICMP stat for ignored echos? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  *	Handle ICMP Timestamp requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  *	RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  *		  SHOULD be in the kernel for minimum random latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  *		  MUST be accurate to a few minutes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  *		  MUST be updated at least at 15Hz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static bool icmp_timestamp(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	struct icmp_bxm icmp_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	 *	Too short.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (skb->len < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	 *	Fill in the current time as ms since midnight UT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	icmp_param.data.times[1] = inet_current_timestamp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	icmp_param.data.times[2] = icmp_param.data.times[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	BUG_ON(skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	icmp_param.data.icmph	   = *icmp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	icmp_param.data.icmph.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	icmp_param.skb		   = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	icmp_param.offset	   = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	icmp_param.data_len	   = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	icmp_param.head_len	   = sizeof(struct icmphdr) + 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	icmp_reply(&icmp_param, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	__ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static bool icmp_discard(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	/* pretend it was a success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  *	Deal with incoming ICMP packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) int icmp_rcv(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	struct icmphdr *icmph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	struct rtable *rt = skb_rtable(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	struct net *net = dev_net(rt->dst.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	bool success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		struct sec_path *sp = skb_sec_path(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		int nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		if (!(sp && sp->xvec[sp->len - 1]->props.flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 				 XFRM_STATE_ICMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		nh = skb_network_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		skb_set_network_header(skb, sizeof(*icmph));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		skb_set_network_header(skb, nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	__ICMP_INC_STATS(net, ICMP_MIB_INMSGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if (skb_checksum_simple_validate(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		goto csum_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	if (!pskb_pull(skb, sizeof(*icmph)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	icmph = icmp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	ICMPMSGIN_INC_STATS(net, icmph->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	 *	18 is the highest 'known' ICMP type. Anything else is a mystery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	 *	RFC 1122: 3.2.2  Unknown ICMP messages types MUST be silently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	 *		  discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	if (icmph->type > NR_ICMP_TYPES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	 *	Parse the ICMP message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		 *	RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		 *	  silently ignored (we let user decide with a sysctl).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		 *	RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		 *	  discarded if to broadcast/multicast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		if ((icmph->type == ICMP_ECHO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		     icmph->type == ICMP_TIMESTAMP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		    net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		if (icmph->type != ICMP_ECHO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		    icmph->type != ICMP_TIMESTAMP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		    icmph->type != ICMP_ADDRESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		    icmph->type != ICMP_ADDRESSREPLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	success = icmp_pointers[icmph->type].handler(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	if (success)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	return NET_RX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) csum_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	__ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	struct icmp_extobj_hdr *objh, _objh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	struct icmp_ext_hdr *exth, _exth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	u16 olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	exth = skb_header_pointer(skb, off, sizeof(_exth), &_exth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	if (!exth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (exth->version != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (exth->checksum &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	    csum_fold(skb_checksum(skb, off, skb->len - off, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	off += sizeof(_exth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	while (off < skb->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		objh = skb_header_pointer(skb, off, sizeof(_objh), &_objh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		if (!objh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		olen = ntohs(objh->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		if (olen < sizeof(_objh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		off += olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		if (off > skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) void ip_icmp_error_rfc4884(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			   struct sock_ee_data_rfc4884 *out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			   int thlen, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	int hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	/* original datagram headers: end of icmph to payload (skb->data) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	hlen = -skb_transport_offset(skb) - thlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	/* per rfc 4884: minimal datagram length of 128 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	if (off < 128 || off < hlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	/* kernel has stripped headers: return payload offset in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	off -= hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (off + sizeof(struct icmp_ext_hdr) > skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	out->len = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (!ip_icmp_error_rfc4884_validate(skb, off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		out->flags |= SO_EE_RFC4884_FLAG_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) EXPORT_SYMBOL_GPL(ip_icmp_error_rfc4884);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) int icmp_err(struct sk_buff *skb, u32 info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	struct iphdr *iph = (struct iphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	int offset = iph->ihl<<2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	int type = icmp_hdr(skb)->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	int code = icmp_hdr(skb)->code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	 * Use ping_err to handle all icmp errors except those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	 * triggered by ICMP_ECHOREPLY which sent from kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	if (icmph->type != ICMP_ECHOREPLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		ping_err(skb, offset, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	else if (type == ICMP_REDIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		ipv4_redirect(skb, net, 0, IPPROTO_ICMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  *	This table is the definition of how we handle ICMP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	[ICMP_ECHOREPLY] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		.handler = ping_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	[1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	[ICMP_DEST_UNREACH] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		.handler = icmp_unreach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	[ICMP_SOURCE_QUENCH] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		.handler = icmp_unreach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	[ICMP_REDIRECT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		.handler = icmp_redirect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	[6] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	[7] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	[ICMP_ECHO] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		.handler = icmp_echo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	[9] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	[10] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	[ICMP_TIME_EXCEEDED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		.handler = icmp_unreach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	[ICMP_PARAMETERPROB] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		.handler = icmp_unreach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		.error = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	[ICMP_TIMESTAMP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		.handler = icmp_timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	[ICMP_TIMESTAMPREPLY] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	[ICMP_INFO_REQUEST] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	[ICMP_INFO_REPLY] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	[ICMP_ADDRESS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	[ICMP_ADDRESSREPLY] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		.handler = icmp_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static void __net_exit icmp_sk_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	free_percpu(net->ipv4.icmp_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	net->ipv4.icmp_sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static int __net_init icmp_sk_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	net->ipv4.icmp_sk = alloc_percpu(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	if (!net->ipv4.icmp_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		err = inet_ctl_sock_create(&sk, PF_INET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 					   SOCK_RAW, IPPROTO_ICMP, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		*per_cpu_ptr(net->ipv4.icmp_sk, i) = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		/* Enough space for 2 64K ICMP packets, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		 * sk_buff/skb_shared_info struct overhead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		sk->sk_sndbuf =	2 * SKB_TRUESIZE(64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		 * Speedup sock_wfree()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	/* Control parameters for ECHO replies. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	net->ipv4.sysctl_icmp_echo_ignore_all = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	/* Control parameter - ignore bogus broadcast responses? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	 * 	Configurable global rate limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	 *	ratelimit defines tokens/packet consumed for dst->rate_token
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	 *	bucket ratemask defines which icmp types are ratelimited by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 *	setting	it's bit position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	 *	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	 *	dest unreachable (3), source quench (4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	 *	time exceeded (11), parameter problem (12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	net->ipv4.sysctl_icmp_ratemask = 0x1818;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	icmp_sk_exit(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static struct pernet_operations __net_initdata icmp_sk_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)        .init = icmp_sk_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)        .exit = icmp_sk_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) int __init icmp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	return register_pernet_subsys(&icmp_sk_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }