Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) /* ar-skbuff.c: socket buffer destruction handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4)  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5)  * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/af_rxrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "ar-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)  * Note the allocation or reception of a socket buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) 	const void *here = __builtin_return_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 	int n = atomic_inc_return(select_skb_count(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 	trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 			rxrpc_skb(skb)->rx_flags, here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)  * Note the re-emergence of a socket buffer from a queue or buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) 	const void *here = __builtin_return_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) 		int n = atomic_read(select_skb_count(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) 		trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 				rxrpc_skb(skb)->rx_flags, here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)  * Note the addition of a ref on a socket buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 	const void *here = __builtin_return_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 	int n = atomic_inc_return(select_skb_count(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 	trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 			rxrpc_skb(skb)->rx_flags, here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 	skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)  * Note the dropping of a ref on a socket buffer by the core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) 	const void *here = __builtin_return_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 	int n = atomic_inc_return(&rxrpc_n_rx_skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) 	trace_rxrpc_skb(skb, op, 0, n, 0, here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)  * Note the destruction of a socket buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 	const void *here = __builtin_return_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 		int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 		CHECK_SLAB_OKAY(&skb->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 		n = atomic_dec_return(select_skb_count(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 		trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 				rxrpc_skb(skb)->rx_flags, here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)  * Clear a queue of socket buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void rxrpc_purge_queue(struct sk_buff_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) 	const void *here = __builtin_return_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) 	while ((skb = skb_dequeue((list))) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) 		int n = atomic_dec_return(select_skb_count(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) 		trace_rxrpc_skb(skb, rxrpc_skb_purged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) 				refcount_read(&skb->users), n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) 				rxrpc_skb(skb)->rx_flags, here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }