Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/skmsg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <net/tls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 		      struct msghdr *msg, int len, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	struct iov_iter *iter = &msg->msg_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	int peek = flags & MSG_PEEK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	struct sk_msg *msg_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	int i, copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	msg_rx = list_first_entry_or_null(&psock->ingress_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 					  struct sk_msg, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	while (copied != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 		struct scatterlist *sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		if (unlikely(!msg_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		i = msg_rx->sg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 			struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 			int copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 			sge = sk_msg_elem(msg_rx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 			copy = sge->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 			page = sg_page(sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 			if (copied + copy > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 				copy = len - copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			copy = copy_page_to_iter(page, sge->offset, copy, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 			if (!copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 				return copied ? copied : -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 			copied += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 			if (likely(!peek)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 				sge->offset += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 				sge->length -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 				if (!msg_rx->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 					sk_mem_uncharge(sk, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 				msg_rx->sg.size -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 				if (!sge->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 					sk_msg_iter_var_next(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 					if (!msg_rx->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 						put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 				/* Lets not optimize peek case if copy_page_to_iter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 				 * didn't copy the entire length lets just break.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 				if (copy != sge->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 					return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 				sk_msg_iter_var_next(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 			if (copied == len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		} while (i != msg_rx->sg.end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		if (unlikely(peek)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			if (msg_rx == list_last_entry(&psock->ingress_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 						      struct sk_msg, list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			msg_rx = list_next_entry(msg_rx, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		msg_rx->sg.start = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			list_del(&msg_rx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			if (msg_rx->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 				consume_skb(msg_rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			kfree(msg_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		msg_rx = list_first_entry_or_null(&psock->ingress_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 						  struct sk_msg, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) EXPORT_SYMBOL_GPL(__tcp_bpf_recvmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			   struct sk_msg *msg, u32 apply_bytes, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	bool apply = apply_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct scatterlist *sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	u32 size, copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct sk_msg *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (unlikely(!tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	tmp->sg.start = msg->sg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	i = msg->sg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		sge = sk_msg_elem(msg, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		size = (apply && apply_bytes < sge->length) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			apply_bytes : sge->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		if (!sk_wmem_schedule(sk, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			if (!copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		sk_mem_charge(sk, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		sk_msg_xfer(tmp, msg, i, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		copied += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		if (sge->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			get_page(sk_msg_page(tmp, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		sk_msg_iter_var_next(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		tmp->sg.end = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		if (apply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			apply_bytes -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			if (!apply_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	} while (i != msg->sg.end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		msg->sg.start = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		sk_psock_queue_msg(psock, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		sk_psock_data_ready(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		sk_msg_free(sk, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			int flags, bool uncharge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	bool apply = apply_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct scatterlist *sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	int size, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	u32 off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		bool has_tx_ulp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		sge = sk_msg_elem(msg, msg->sg.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		size = (apply && apply_bytes < sge->length) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			apply_bytes : sge->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		off  = sge->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		page = sg_page(sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		tcp_rate_check_app_limited(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		has_tx_ulp = tls_sw_has_ctx_tx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		if (has_tx_ulp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			flags |= MSG_SENDPAGE_NOPOLICY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			ret = kernel_sendpage_locked(sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 						     page, off, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			ret = do_tcp_sendpages(sk, page, off, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		if (apply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			apply_bytes -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		msg->sg.size -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		sge->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		sge->length -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		if (uncharge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			sk_mem_uncharge(sk, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		if (ret != size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			size -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			off  += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		if (!sge->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			sk_msg_iter_next(msg, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			sg_init_table(sge, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			if (msg->sg.start == msg->sg.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		if (apply && !apply_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			       u32 apply_bytes, int flags, bool uncharge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			  u32 bytes, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	bool ingress = sk_msg_to_ingress(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	struct sk_psock *psock = sk_psock_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (unlikely(!psock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			tcp_bpf_push_locked(sk, msg, bytes, flags, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #ifdef CONFIG_BPF_STREAM_PARSER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static bool tcp_bpf_stream_read(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	struct sk_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	bool empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	psock = sk_psock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	if (likely(psock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		empty = list_empty(&psock->ingress_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return !empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			     int flags, long timeo, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (sk->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (!timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	add_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	ret = sk_wait_event(sk, &timeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			    !list_empty(&psock->ingress_msg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			    !skb_queue_empty(&sk->sk_receive_queue), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	remove_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		    int nonblock, int flags, int *addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	struct sk_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	int copied, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	if (unlikely(flags & MSG_ERRQUEUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		return inet_recv_error(sk, msg, len, addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	psock = sk_psock_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (unlikely(!psock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (!skb_queue_empty(&sk->sk_receive_queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	    sk_psock_queue_empty(psock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) msg_bytes_ready:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (!copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		int data, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		timeo = sock_rcvtimeo(sk, nonblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			if (!sk_psock_queue_empty(psock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 				goto msg_bytes_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		copied = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	ret = copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 				struct sk_msg *msg, int *copied, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	bool cork = false, enospc = sk_msg_full(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	struct sock *sk_redir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	u32 tosend, delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	u32 eval = __SK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) more_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	if (psock->eval == __SK_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		/* Track delta in msg size to add/subtract it on SK_DROP from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		 * returned to user copied size. This ensures user doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		 * get a positive return code with msg_cut_data and SK_DROP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		 * verdict.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		delta = msg->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		delta -= msg->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (msg->cork_bytes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	    msg->cork_bytes > msg->sg.size && !enospc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		psock->cork_bytes = msg->cork_bytes - msg->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		if (!psock->cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			psock->cork = kzalloc(sizeof(*psock->cork),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 					      GFP_ATOMIC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			if (!psock->cork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		memcpy(psock->cork, msg, sizeof(*msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	tosend = msg->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (psock->apply_bytes && psock->apply_bytes < tosend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		tosend = psock->apply_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	switch (psock->eval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	case __SK_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		ret = tcp_bpf_push(sk, msg, tosend, flags, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			*copied -= sk_msg_free(sk, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		sk_msg_apply_bytes(psock, tosend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	case __SK_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		sk_redir = psock->sk_redir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		sk_msg_apply_bytes(psock, tosend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		if (!psock->apply_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			/* Clean up before releasing the sock lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			eval = psock->eval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			psock->eval = __SK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			psock->sk_redir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		if (psock->cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			cork = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 			psock->cork = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		sk_msg_return(sk, msg, msg->sg.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		if (eval == __SK_REDIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			sock_put(sk_redir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			int free = sk_msg_free_nocharge(sk, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			if (!cork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 				*copied -= free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		if (cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 			sk_msg_free(sk, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 			kfree(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	case __SK_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		sk_msg_free_partial(sk, msg, tosend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		sk_msg_apply_bytes(psock, tosend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		*copied -= (tosend + delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (likely(!ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		if (!psock->apply_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			psock->eval =  __SK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			if (psock->sk_redir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 				sock_put(psock->sk_redir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 				psock->sk_redir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		if (msg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		    msg->sg.data[msg->sg.start].page_link &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		    msg->sg.data[msg->sg.start].length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			if (eval == __SK_REDIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 				sk_mem_charge(sk, msg->sg.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			goto more_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	struct sk_msg tmp, *msg_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	int copied = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	struct sk_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	/* Don't let internal do_tcp_sendpages() flags through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	flags |= MSG_NO_SHARED_FRAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	psock = sk_psock_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	if (unlikely(!psock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		return tcp_sendmsg(sk, msg, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	while (msg_data_left(msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		bool enospc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		u32 copy, osize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		if (sk->sk_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 			err = -sk->sk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		copy = msg_data_left(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		if (!sk_stream_memory_free(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			goto wait_for_sndbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		if (psock->cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 			msg_tx = psock->cork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			msg_tx = &tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			sk_msg_init(msg_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		osize = msg_tx->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			if (err != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 				goto wait_for_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			enospc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			copy = msg_tx->sg.size - osize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 					       copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			sk_msg_trim(sk, msg_tx, osize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		copied += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		if (psock->cork_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 			if (size > psock->cork_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 				psock->cork_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 				psock->cork_bytes -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			if (psock->cork_bytes && !enospc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 				goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			/* All cork bytes are accounted, rerun the prog. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			psock->eval = __SK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			psock->cork_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) wait_for_sndbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) wait_for_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		err = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 			if (msg_tx && msg_tx != psock->cork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 				sk_msg_free(sk, msg_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		err = sk_stream_error(sk, msg->msg_flags, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	return copied ? copied : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			    size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	struct sk_msg tmp, *msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	int err = 0, copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	struct sk_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	bool enospc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	psock = sk_psock_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	if (unlikely(!psock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		return tcp_sendpage(sk, page, offset, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	if (psock->cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		msg = psock->cork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		msg = &tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		sk_msg_init(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	/* Catch case where ring is full and sendpage is stalled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	if (unlikely(sk_msg_full(msg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	sk_msg_page_add(msg, page, size, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	sk_mem_charge(sk, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	copied = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	if (sk_msg_full(msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		enospc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (psock->cork_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		if (size > psock->cork_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			psock->cork_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			psock->cork_bytes -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		if (psock->cork_bytes && !enospc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		/* All cork bytes are accounted, rerun the prog. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		psock->eval = __SK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		psock->cork_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	return copied ? copied : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	TCP_BPF_IPV4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	TCP_BPF_IPV6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	TCP_BPF_NUM_PROTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	TCP_BPF_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	TCP_BPF_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	TCP_BPF_NUM_CFGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static struct proto *tcpv6_prot_saved __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static DEFINE_SPINLOCK(tcpv6_prot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 				   struct proto *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	prot[TCP_BPF_BASE]			= *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	prot[TCP_BPF_BASE].close		= sock_map_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	prot[TCP_BPF_BASE].recvmsg		= tcp_bpf_recvmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	prot[TCP_BPF_BASE].stream_memory_read	= tcp_bpf_stream_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	prot[TCP_BPF_TX]			= prot[TCP_BPF_BASE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	prot[TCP_BPF_TX].sendmsg		= tcp_bpf_sendmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	prot[TCP_BPF_TX].sendpage		= tcp_bpf_sendpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		spin_lock_bh(&tcpv6_prot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		if (likely(ops != tcpv6_prot_saved)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 			tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 			smp_store_release(&tcpv6_prot_saved, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		spin_unlock_bh(&tcpv6_prot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static int __init tcp_bpf_v4_build_proto(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) late_initcall(tcp_bpf_v4_build_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static int tcp_bpf_assert_proto_ops(struct proto *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	/* In order to avoid retpoline, we make assumptions when we call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	 * into ops if e.g. a psock is not present. Make sure they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	 * indeed valid assumptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	return ops->recvmsg  == tcp_recvmsg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	       ops->sendmsg  == tcp_sendmsg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	       ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	int config = psock->progs.msg_parser   ? TCP_BPF_TX   : TCP_BPF_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	if (sk->sk_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		if (tcp_bpf_assert_proto_ops(psock->sk_proto))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 			return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		tcp_bpf_check_v6_needs_rebuild(psock->sk_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	return &tcp_bpf_prots[family][config];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* If a child got cloned from a listening socket that had tcp_bpf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)  * protocol callbacks installed, we need to restore the callbacks to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)  * the default ones because the child does not inherit the psock state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)  * that tcp_bpf callbacks expect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	struct proto *prot = newsk->sk_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		newsk->sk_prot = sk->sk_prot_creator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #endif /* CONFIG_BPF_STREAM_PARSER */