^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Kernel Connection Multiplexor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/kcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <net/netns/generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <uapi/linux/kcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int kcm_net_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static struct kmem_cache *kcm_psockp __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static struct kmem_cache *kcm_muxp __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static struct workqueue_struct *kcm_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline struct kcm_sock *kcm_sk(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return (struct kcm_sock *)sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return (struct kcm_tx_msg *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void report_csk_error(struct sock *csk, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) csk->sk_err = EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) csk->sk_error_report(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) bool wakeup_kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct sock *csk = psock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct kcm_mux *mux = psock->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Unrecoverable error in transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (psock->tx_stopped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) psock->tx_stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) KCM_STATS_INCR(psock->stats.tx_aborts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!psock->tx_kcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Take off psocks_avail list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) list_del(&psock->psock_avail_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) } else if (wakeup_kcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* In this case psock is being aborted while outside of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * write_msgs and psock is reserved. Schedule tx_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * to handle the failure there. Need to commit tx_stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * before queuing work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) queue_work(kcm_wq, &psock->tx_kcm->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Report error on lower socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) report_csk_error(csk, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* RX mux lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct kcm_psock *psock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) STRP_STATS_ADD(mux->stats.rx_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) psock->strp.stats.bytes -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) psock->saved_rx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) mux->stats.rx_msgs +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) psock->strp.stats.msgs - psock->saved_rx_msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) psock->saved_rx_msgs = psock->strp.stats.msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) psock->saved_rx_bytes = psock->strp.stats.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct kcm_psock *psock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) KCM_STATS_ADD(mux->stats.tx_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) psock->stats.tx_bytes - psock->saved_tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mux->stats.tx_msgs +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) psock->stats.tx_msgs - psock->saved_tx_msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) psock->saved_tx_msgs = psock->stats.tx_msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) psock->saved_tx_bytes = psock->stats.tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* KCM is ready to receive messages on its queue-- either the KCM is new or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * has become unblocked after being blocked on full socket buffer. Queue any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * pending ready messages on a psock. RX mux lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void kcm_rcv_ready(struct kcm_sock *kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct kcm_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Assuming buffer limit has been reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) skb_queue_head(&mux->rx_hold_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) while (!list_empty(&mux->psocks_ready)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) psock_ready_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Assuming buffer limit has been reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Consumed the ready message on the psock. Schedule rx_work to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * get more messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) list_del(&psock->psock_ready_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) psock->ready_rx_msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* Commit clearing of ready_rx_msg for queuing work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) strp_unpause(&psock->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) strp_check_rcv(&psock->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Buffer limit is okay now, add to ready list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) list_add_tail(&kcm->wait_rx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) &kcm->mux->kcm_rx_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) kcm->rx_wait = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void kcm_rfree(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct sock *sk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct kcm_sock *kcm = kcm_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned int len = skb->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) sk_mem_uncharge(sk, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) atomic_sub(len, &sk->sk_rmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* For reading rx_wait and rx_psock without holding lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!kcm->rx_wait && !kcm->rx_psock &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) spin_lock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) kcm_rcv_ready(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct sk_buff_head *list = &sk->sk_receive_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!sk_rmem_schedule(sk, skb, skb->truesize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) skb->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) skb->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) skb->destructor = kcm_rfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) atomic_add(skb->truesize, &sk->sk_rmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) sk_mem_charge(sk, skb->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!sock_flag(sk, SOCK_DEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* Requeue received messages for a kcm socket to other kcm sockets. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * called with a kcm socket is receive disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * RX mux lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct kcm_sock *kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) while ((skb = __skb_dequeue(head))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Reset destructor to avoid calling kcm_rcv_ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) skb->destructor = sock_rfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) try_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (list_empty(&mux->kcm_rx_waiters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) skb_queue_tail(&mux->rx_hold_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) kcm = list_first_entry(&mux->kcm_rx_waiters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct kcm_sock, wait_rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Should mean socket buffer full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) list_del(&kcm->wait_rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) kcm->rx_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* Commit rx_wait to read in kcm_free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto try_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* Lower sock lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct sk_buff *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct kcm_mux *mux = psock->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct kcm_sock *kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) WARN_ON(psock->ready_rx_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (psock->rx_kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return psock->rx_kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) spin_lock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (psock->rx_kcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return psock->rx_kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) kcm_update_rx_mux_stats(mux, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (list_empty(&mux->kcm_rx_waiters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) psock->ready_rx_msg = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) strp_pause(&psock->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) list_add_tail(&psock->psock_ready_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) &mux->psocks_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) kcm = list_first_entry(&mux->kcm_rx_waiters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct kcm_sock, wait_rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) list_del(&kcm->wait_rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) kcm->rx_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) psock->rx_kcm = kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) kcm->rx_psock = psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void kcm_done(struct kcm_sock *kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void kcm_done_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) kcm_done(container_of(w, struct kcm_sock, done_work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Lower sock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void unreserve_rx_kcm(struct kcm_psock *psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) bool rcv_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct kcm_sock *kcm = psock->rx_kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct kcm_mux *mux = psock->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_lock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) psock->rx_kcm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) kcm->rx_psock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * kcm_rfree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (unlikely(kcm->done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Need to run kcm_done in a task since we need to qcquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * callback locks which may already be held here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) INIT_WORK(&kcm->done_work, kcm_done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) schedule_work(&kcm->done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (unlikely(kcm->rx_disabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Check for degenerative race with rx_wait that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * data was dequeued (accounted for in kcm_rfree).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) kcm_rcv_ready(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Lower sock lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void psock_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct kcm_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) psock = (struct kcm_psock *)sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (likely(psock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) strp_data_ready(&psock->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Called with lower sock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct kcm_sock *kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) try_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) kcm = reserve_rx_kcm(psock, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!kcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* Unable to reserve a KCM, message is held in psock and strp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * is paused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Should mean socket buffer full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unreserve_rx_kcm(psock, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto try_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct bpf_prog *prog = psock->bpf_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) res = bpf_prog_run_pin_on_cpu(prog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int kcm_read_sock_done(struct strparser *strp, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unreserve_rx_kcm(psock, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static void psock_state_change(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * since application will normally not poll with EPOLLIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * on the TCP sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) report_csk_error(sk, EPIPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void psock_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct kcm_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct kcm_mux *mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct kcm_sock *kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) psock = (struct kcm_psock *)sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (unlikely(!psock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) mux = psock->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Check if the socket is reserved so someone is waiting for sending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) kcm = psock->tx_kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (kcm && !unlikely(kcm->tx_stopped))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) queue_work(kcm_wq, &kcm->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static void unreserve_psock(struct kcm_sock *kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* kcm sock is locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct kcm_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) psock = kcm->tx_psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) smp_rmb(); /* Must read tx_psock before tx_wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (psock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) WARN_ON(kcm->tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (unlikely(psock->tx_stopped))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) unreserve_psock(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return kcm->tx_psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Check again under lock to see if psock was reserved for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * psock via psock_unreserve.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) psock = kcm->tx_psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (unlikely(psock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) WARN_ON(kcm->tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return kcm->tx_psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!list_empty(&mux->psocks_avail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) psock = list_first_entry(&mux->psocks_avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct kcm_psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) psock_avail_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) list_del(&psock->psock_avail_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (kcm->tx_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) list_del(&kcm->wait_psock_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) kcm->tx_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) kcm->tx_psock = psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) psock->tx_kcm = kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) KCM_STATS_INCR(psock->stats.reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) } else if (!kcm->tx_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) list_add_tail(&kcm->wait_psock_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) &mux->kcm_tx_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) kcm->tx_wait = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* mux lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void psock_now_avail(struct kcm_psock *psock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct kcm_mux *mux = psock->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct kcm_sock *kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (list_empty(&mux->kcm_tx_waiters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) list_add_tail(&psock->psock_avail_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) &mux->psocks_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) kcm = list_first_entry(&mux->kcm_tx_waiters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct kcm_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) wait_psock_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) list_del(&kcm->wait_psock_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) kcm->tx_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) psock->tx_kcm = kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Commit before changing tx_psock since that is read in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * reserve_psock before queuing work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) kcm->tx_psock = psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) KCM_STATS_INCR(psock->stats.reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) queue_work(kcm_wq, &kcm->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* kcm sock is locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static void unreserve_psock(struct kcm_sock *kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct kcm_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) psock = kcm->tx_psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (WARN_ON(!psock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) smp_rmb(); /* Read tx_psock before tx_wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) kcm_update_tx_mux_stats(mux, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) WARN_ON(kcm->tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) kcm->tx_psock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) psock->tx_kcm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) KCM_STATS_INCR(psock->stats.unreserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (unlikely(psock->tx_stopped)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (psock->done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Deferred free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) list_del(&psock->psock_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) mux->psocks_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) sock_put(psock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) fput(psock->sk->sk_socket->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) kmem_cache_free(kcm_psockp, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* Don't put back on available list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) psock_now_avail(psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static void kcm_report_tx_retry(struct kcm_sock *kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) KCM_STATS_INCR(mux->stats.tx_retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* Write any messages ready on the kcm socket. Called with kcm sock lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * held. Return bytes actually sent or error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static int kcm_write_msgs(struct kcm_sock *kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct sock *sk = &kcm->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct kcm_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct sk_buff *skb, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct kcm_tx_msg *txm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned short fragidx, frag_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned int sent, total_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) kcm->tx_wait_more = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) psock = kcm->tx_psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (unlikely(psock && psock->tx_stopped)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* A reserved psock was aborted asynchronously. Unreserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * it and we'll retry the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) unreserve_psock(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) kcm_report_tx_retry(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (skb_queue_empty(&sk->sk_write_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) } else if (skb_queue_empty(&sk->sk_write_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) head = skb_peek(&sk->sk_write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) txm = kcm_tx_msg(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (txm->sent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Send of first skbuff in queue already in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (WARN_ON(!psock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sent = txm->sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) frag_offset = txm->frag_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) fragidx = txm->fragidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) skb = txm->frag_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) goto do_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) try_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) psock = reserve_psock(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (!psock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) skb = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) txm = kcm_tx_msg(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) do_frag_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) fragidx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) skb_frag_t *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) frag_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) do_frag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) frag = &skb_shinfo(skb)->frags[fragidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (WARN_ON(!skb_frag_size(frag))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ret = kernel_sendpage(psock->sk->sk_socket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) skb_frag_page(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) skb_frag_off(frag) + frag_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) skb_frag_size(frag) - frag_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* Save state to try again when there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * write space on the socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) txm->sent = sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) txm->frag_offset = frag_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) txm->fragidx = fragidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) txm->frag_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* Hard failure in sending message, abort this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * psock since it has lost framing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * synchonization and retry sending the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * message from the beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unreserve_psock(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) txm->sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) kcm_report_tx_retry(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) goto try_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) sent += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) frag_offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) KCM_STATS_ADD(psock->stats.tx_bytes, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (frag_offset < skb_frag_size(frag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* Not finished with this frag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) goto do_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (skb == head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (skb_has_frag_list(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) skb = skb_shinfo(skb)->frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) goto do_frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) } else if (skb->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) skb = skb->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto do_frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* Successfully sent the whole packet, account for it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) skb_dequeue(&sk->sk_write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) kfree_skb(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) sk->sk_wmem_queued -= sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) total_sent += sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) KCM_STATS_INCR(psock->stats.tx_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) } while ((head = skb_peek(&sk->sk_write_queue)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* Done with all queued messages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unreserve_psock(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* Check if write space is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) sk->sk_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return total_sent ? : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static void kcm_tx_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct sock *sk = &kcm->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * aborts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) err = kcm_write_msgs(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Hard failure in write, report error on KCM socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) report_csk_error(&kcm->sk, -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* Primarily for SOCK_SEQPACKET sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (likely(sk->sk_socket) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) sk->sk_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static void kcm_push(struct kcm_sock *kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (kcm->tx_wait_more)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) kcm_write_msgs(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) int offset, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct kcm_sock *kcm = kcm_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct sk_buff *skb = NULL, *head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) bool eor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (flags & MSG_SENDPAGE_NOTLAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) flags |= MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* No MSG_EOR from splice, only look at MSG_MORE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) eor = !(flags & MSG_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) err = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (sk->sk_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (kcm->seq_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Previously opened message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) head = kcm->seq_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) skb = kcm_tx_msg(head)->last_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) i = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (skb_can_coalesce(skb, i, page, offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) goto coalesced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (i >= MAX_SKB_FRAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct sk_buff *tskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) tskb = alloc_skb(0, sk->sk_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) while (!tskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) kcm_push(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) err = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (head == skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) skb_shinfo(head)->frag_list = tskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) skb->next = tskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) skb = tskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Call the sk_stream functions to manage the sndbuf mem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (!sk_stream_memory_free(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) kcm_push(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) err = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) head = alloc_skb(0, sk->sk_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) while (!head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) kcm_push(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) err = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) skb = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) skb_fill_page_desc(skb, i, page, offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) coalesced:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) skb->len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) skb->data_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) skb->truesize += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) sk->sk_wmem_queued += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) sk_mem_charge(sk, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (head != skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) head->len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) head->data_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) head->truesize += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (eor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) bool not_busy = skb_queue_empty(&sk->sk_write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* Message complete, queue it on send buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) __skb_queue_tail(&sk->sk_write_queue, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) kcm->seq_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) KCM_STATS_INCR(kcm->stats.tx_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (flags & MSG_BATCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) kcm->tx_wait_more = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) } else if (kcm->tx_wait_more || not_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) err = kcm_write_msgs(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* We got a hard error in write_msgs but have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * already queued this message. Report an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * in the socket, but don't affect return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * from sendmsg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) pr_warn("KCM: Hard failure on kcm_write_msgs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) report_csk_error(&kcm->sk, -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* Message not complete, save state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) kcm->seq_skb = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) kcm_tx_msg(head)->last_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) KCM_STATS_ADD(kcm->stats.tx_bytes, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) kcm_push(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) err = sk_stream_error(sk, flags, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /* make sure we wake any epoll edge trigger waiter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) sk->sk_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct kcm_sock *kcm = kcm_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct sk_buff *skb = NULL, *head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) size_t copy, copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) int eor = (sock->type == SOCK_DGRAM) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int err = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* Per tcp_sendmsg this should be in poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (sk->sk_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (kcm->seq_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* Previously opened message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) head = kcm->seq_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) skb = kcm_tx_msg(head)->last_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) goto start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* Call the sk_stream functions to manage the sndbuf mem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (!sk_stream_memory_free(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) kcm_push(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) err = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (msg_data_left(msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* New message, alloc head skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) head = alloc_skb(0, sk->sk_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) while (!head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) kcm_push(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) err = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) head = alloc_skb(0, sk->sk_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) skb = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * csum_and_copy_from_iter from skb_do_copy_data_nocache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) while (msg_data_left(msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) bool merge = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) int i = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct page_frag *pfrag = sk_page_frag(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!sk_page_frag_refill(sk, pfrag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto wait_for_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (!skb_can_coalesce(skb, i, pfrag->page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) pfrag->offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (i == MAX_SKB_FRAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct sk_buff *tskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) tskb = alloc_skb(0, sk->sk_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (!tskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) goto wait_for_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (head == skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) skb_shinfo(head)->frag_list = tskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) skb->next = tskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) skb = tskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) merge = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) copy = min_t(int, msg_data_left(msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) pfrag->size - pfrag->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (!sk_wmem_schedule(sk, copy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) goto wait_for_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) pfrag->page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) pfrag->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* Update the skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (merge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) skb_fill_page_desc(skb, i, pfrag->page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) pfrag->offset, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) get_page(pfrag->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) pfrag->offset += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) copied += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (head != skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) head->len += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) head->data_len += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) wait_for_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) kcm_push(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) err = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (eor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) bool not_busy = skb_queue_empty(&sk->sk_write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* Message complete, queue it on send buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) __skb_queue_tail(&sk->sk_write_queue, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) kcm->seq_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) KCM_STATS_INCR(kcm->stats.tx_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (msg->msg_flags & MSG_BATCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) kcm->tx_wait_more = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) } else if (kcm->tx_wait_more || not_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) err = kcm_write_msgs(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* We got a hard error in write_msgs but have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * already queued this message. Report an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * in the socket, but don't affect return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * from sendmsg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) pr_warn("KCM: Hard failure on kcm_write_msgs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) report_csk_error(&kcm->sk, -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /* Message not complete, save state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) partial_message:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) kcm->seq_skb = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) kcm_tx_msg(head)->last_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) kcm_push(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (copied && sock->type == SOCK_SEQPACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* Wrote some bytes before encountering an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * error, return partial success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) goto partial_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (head != kcm->seq_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) kfree_skb(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) err = sk_stream_error(sk, msg->msg_flags, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /* make sure we wake any epoll edge trigger waiter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) sk->sk_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) long timeo, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) while (!(skb = skb_peek(&sk->sk_receive_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (sk->sk_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) *err = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (sock_flag(sk, SOCK_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if ((flags & MSG_DONTWAIT) || !timeo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) *err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) sk_wait_data(sk, &timeo, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* Handle signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) *err = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) size_t len, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct kcm_sock *kcm = kcm_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct strp_msg *stm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) int copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) skb = kcm_wait_data(sk, flags, timeo, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /* Okay, have a message on the receive queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) stm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (len > stm->full_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) len = stm->full_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) copied = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (likely(!(flags & MSG_PEEK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (copied < stm->full_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (sock->type == SOCK_DGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* Truncated message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) msg->msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) goto msg_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) stm->offset += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) stm->full_len -= copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) msg_finished:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* Finished with message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) msg->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) KCM_STATS_INCR(kcm->stats.rx_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) skb_unlink(skb, &sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return copied ? : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct pipe_inode_info *pipe, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct kcm_sock *kcm = kcm_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct strp_msg *stm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ssize_t copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* Only support splice for SOCKSEQPACKET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) skb = kcm_wait_data(sk, flags, timeo, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /* Okay, have a message on the receive queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) stm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (len > stm->full_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) len = stm->full_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (copied < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) err = copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) stm->offset += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) stm->full_len -= copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /* We have no way to return MSG_EOR. If all the bytes have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * read we still leave the message in the receive socket buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * A subsequent recvmsg needs to be done to return MSG_EOR and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * finish reading the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /* kcm sock lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static void kcm_recv_disable(struct kcm_sock *kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (kcm->rx_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) spin_lock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) kcm->rx_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* If a psock is reserved we'll do cleanup in unreserve */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (!kcm->rx_psock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (kcm->rx_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) list_del(&kcm->wait_rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) kcm->rx_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* kcm sock lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static void kcm_recv_enable(struct kcm_sock *kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!kcm->rx_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) spin_lock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) kcm->rx_disabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) kcm_rcv_ready(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static int kcm_setsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) sockptr_t optval, unsigned int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct kcm_sock *kcm = kcm_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) int val, valbool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (level != SOL_KCM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (optlen < sizeof(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (copy_from_sockptr(&val, optval, sizeof(int)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) valbool = val ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) case KCM_RECV_DISABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) lock_sock(&kcm->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (valbool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) kcm_recv_disable(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) kcm_recv_enable(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) release_sock(&kcm->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) err = -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static int kcm_getsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) char __user *optval, int __user *optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct kcm_sock *kcm = kcm_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) int val, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (level != SOL_KCM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (get_user(len, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) len = min_t(unsigned int, len, sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) case KCM_RECV_DISABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) val = kcm->rx_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (put_user(len, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (copy_to_user(optval, &val, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct kcm_sock *tkcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * we set sk_state, otherwise epoll_wait always returns right away with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * EPOLLHUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) kcm->sk.sk_state = TCP_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* Add to mux's kcm sockets list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) kcm->mux = mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) head = &mux->kcm_socks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (tkcm->index != index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) head = &tkcm->kcm_sock_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) list_add(&kcm->kcm_sock_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) kcm->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) mux->kcm_socks_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) INIT_WORK(&kcm->tx_work, kcm_tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) spin_lock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) kcm_rcv_ready(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static int kcm_attach(struct socket *sock, struct socket *csock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct kcm_sock *kcm = kcm_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct sock *csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct kcm_psock *psock = NULL, *tpsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static const struct strp_callbacks cb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) .rcv_msg = kcm_rcv_strparser,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) .parse_msg = kcm_parse_func_strparser,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) .read_sock_done = kcm_read_sock_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) csk = csock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (!csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) lock_sock(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /* Only allow TCP sockets to be attached for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) csk->sk_protocol != IPPROTO_TCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /* Don't allow listeners or closed sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (!psock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) psock->mux = mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) psock->sk = csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) psock->bpf_prog = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) err = strp_init(&psock->strp, csk, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) kmem_cache_free(kcm_psockp, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) write_lock_bh(&csk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /* Check if sk_user_data is aready by KCM or someone else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * Must be done under lock to prevent race conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (csk->sk_user_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) write_unlock_bh(&csk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) strp_stop(&psock->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) strp_done(&psock->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) kmem_cache_free(kcm_psockp, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) err = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) psock->save_data_ready = csk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) psock->save_write_space = csk->sk_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) psock->save_state_change = csk->sk_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) csk->sk_user_data = psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) csk->sk_data_ready = psock_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) csk->sk_write_space = psock_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) csk->sk_state_change = psock_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) write_unlock_bh(&csk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) sock_hold(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* Finished initialization, now add the psock to the MUX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) head = &mux->psocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) list_for_each_entry(tpsock, &mux->psocks, psock_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (tpsock->index != index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) head = &tpsock->psock_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) list_add(&psock->psock_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) psock->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) KCM_STATS_INCR(mux->stats.psock_attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) mux->psocks_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) psock_now_avail(psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /* Schedule RX work in case there are already bytes queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) strp_check_rcv(&psock->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) release_sock(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct socket *csock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) csock = sockfd_lookup(info->fd, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (!csock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (IS_ERR(prog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) err = PTR_ERR(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) err = kcm_attach(sock, csock, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /* Keep reference on file also */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) fput(csock->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static void kcm_unattach(struct kcm_psock *psock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) struct sock *csk = psock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct kcm_mux *mux = psock->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) lock_sock(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /* Stop getting callbacks from TCP socket. After this there should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * be no way to reserve a kcm for this psock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) write_lock_bh(&csk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) csk->sk_user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) csk->sk_data_ready = psock->save_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) csk->sk_write_space = psock->save_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) csk->sk_state_change = psock->save_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) strp_stop(&psock->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (WARN_ON(psock->rx_kcm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) write_unlock_bh(&csk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) release_sock(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) spin_lock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /* Stop receiver activities. After this point psock should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * able to get onto ready list either through callbacks or work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (psock->ready_rx_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) list_del(&psock->psock_ready_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) kfree_skb(psock->ready_rx_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) psock->ready_rx_msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) KCM_STATS_INCR(mux->stats.rx_ready_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) write_unlock_bh(&csk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /* Call strp_done without sock lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) release_sock(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) strp_done(&psock->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) lock_sock(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) bpf_prog_put(psock->bpf_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) KCM_STATS_INCR(mux->stats.psock_unattach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (psock->tx_kcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /* psock was reserved. Just mark it finished and we will clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * up in the kcm paths, we need kcm lock which can not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * acquired here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /* We are unattaching a socket that is reserved. Abort the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * socket since we may be out of sync in sending on it. We need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * to do this without the mux lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) kcm_abort_tx_psock(psock, EPIPE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (!psock->tx_kcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* psock now unreserved in window mux was unlocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) goto no_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) psock->done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /* Commit done before queuing work to process it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /* Queue tx work to make sure psock->done is handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) queue_work(kcm_wq, &psock->tx_kcm->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) no_reserved:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (!psock->tx_stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) list_del(&psock->psock_avail_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) list_del(&psock->psock_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) mux->psocks_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) fput(csk->sk_socket->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) kmem_cache_free(kcm_psockp, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) release_sock(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct kcm_sock *kcm = kcm_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct kcm_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct socket *csock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct sock *csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) csock = sockfd_lookup(info->fd, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (!csock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) csk = csock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (!csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) list_for_each_entry(psock, &mux->psocks, psock_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (psock->sk != csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /* Found the matching psock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (psock->unattaching || WARN_ON(psock->done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) err = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) psock->unattaching = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) /* Lower socket lock should already be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) kcm_unattach(psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) fput(csock->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static struct proto kcm_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) .name = "KCM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) .obj_size = sizeof(struct kcm_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /* Clone a kcm socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) static struct file *kcm_clone(struct socket *osock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) struct socket *newsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct sock *newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) newsock = sock_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (!newsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) return ERR_PTR(-ENFILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) newsock->type = osock->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) newsock->ops = osock->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) __module_get(newsock->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) &kcm_proto, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (!newsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) sock_release(newsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) sock_init_data(newsock, newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) case SIOCKCMATTACH: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct kcm_attach info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) err = kcm_attach_ioctl(sock, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) case SIOCKCMUNATTACH: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) struct kcm_unattach info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) err = kcm_unattach_ioctl(sock, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) case SIOCKCMCLONE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) struct kcm_clone info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) info.fd = get_unused_fd_flags(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (unlikely(info.fd < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return info.fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) file = kcm_clone(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (IS_ERR(file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) put_unused_fd(info.fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return PTR_ERR(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (copy_to_user((void __user *)arg, &info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) sizeof(info))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) put_unused_fd(info.fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) fput(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) fd_install(info.fd, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) err = -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static void free_mux(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct kcm_mux *mux = container_of(rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct kcm_mux, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) kmem_cache_free(kcm_muxp, mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) static void release_mux(struct kcm_mux *mux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct kcm_net *knet = mux->knet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct kcm_psock *psock, *tmp_psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) /* Release psocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) list_for_each_entry_safe(psock, tmp_psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) &mux->psocks, psock_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (!WARN_ON(psock->unattaching))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) kcm_unattach(psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (WARN_ON(mux->psocks_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) __skb_queue_purge(&mux->rx_hold_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) mutex_lock(&knet->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) aggregate_psock_stats(&mux->aggregate_psock_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) &knet->aggregate_psock_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) aggregate_strp_stats(&mux->aggregate_strp_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) &knet->aggregate_strp_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) list_del_rcu(&mux->kcm_mux_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) knet->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) mutex_unlock(&knet->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) call_rcu(&mux->rcu, free_mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) static void kcm_done(struct kcm_sock *kcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) struct kcm_mux *mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) struct sock *sk = &kcm->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) int socks_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) spin_lock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (kcm->rx_psock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) /* Cleanup in unreserve_rx_kcm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) WARN_ON(kcm->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) kcm->rx_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) kcm->done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) if (kcm->rx_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) list_del(&kcm->wait_rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) kcm->rx_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* Move any pending receive messages to other kcm sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) requeue_rx_msgs(mux, &sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) spin_unlock_bh(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (WARN_ON(sk_rmem_alloc_get(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /* Detach from MUX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) list_del(&kcm->kcm_sock_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) mux->kcm_socks_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) socks_cnt = mux->kcm_socks_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (!socks_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) /* We are done with the mux now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) release_mux(mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) WARN_ON(kcm->rx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) sock_put(&kcm->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /* Called by kcm_release to close a KCM socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * If this is the last KCM socket on the MUX, destroy the MUX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) static int kcm_release(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct kcm_sock *kcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct kcm_mux *mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct kcm_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) kcm = kcm_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) mux = kcm->mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) sock_orphan(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) kfree_skb(kcm->seq_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /* Purge queue under lock to avoid race condition with tx_work trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * to act when queue is nonempty. If tx_work runs after this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * it will just return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) __skb_queue_purge(&sk->sk_write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /* Set tx_stopped. This is checked when psock is bound to a kcm and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * get a writespace callback. This prevents further work being queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * from the callback (unbinding the psock occurs after canceling work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) kcm->tx_stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) spin_lock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (kcm->tx_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /* Take of tx_wait list, after this point there should be no way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * that a psock will be assigned to this kcm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) list_del(&kcm->wait_psock_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) kcm->tx_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) spin_unlock_bh(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /* Cancel work. After this point there should be no outside references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * to the kcm socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) cancel_work_sync(&kcm->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) psock = kcm->tx_psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (psock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /* A psock was reserved, so we need to kill it since it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * may already have some bytes queued from a message. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * need to do this after removing kcm from tx_wait list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) kcm_abort_tx_psock(psock, EPIPE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) unreserve_psock(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) WARN_ON(kcm->tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) WARN_ON(kcm->tx_psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) sock->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) kcm_done(kcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) static const struct proto_ops kcm_dgram_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) .family = PF_KCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) .release = kcm_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) .bind = sock_no_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) .connect = sock_no_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) .socketpair = sock_no_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) .accept = sock_no_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) .getname = sock_no_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) .poll = datagram_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) .ioctl = kcm_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) .listen = sock_no_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) .shutdown = sock_no_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) .setsockopt = kcm_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) .getsockopt = kcm_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) .sendmsg = kcm_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) .recvmsg = kcm_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) .mmap = sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) .sendpage = kcm_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) static const struct proto_ops kcm_seqpacket_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) .family = PF_KCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) .release = kcm_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) .bind = sock_no_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) .connect = sock_no_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) .socketpair = sock_no_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) .accept = sock_no_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) .getname = sock_no_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) .poll = datagram_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) .ioctl = kcm_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) .listen = sock_no_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) .shutdown = sock_no_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) .setsockopt = kcm_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) .getsockopt = kcm_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) .sendmsg = kcm_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) .recvmsg = kcm_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) .mmap = sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) .sendpage = kcm_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) .splice_read = kcm_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) /* Create proto operation for kcm sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) static int kcm_create(struct net *net, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) int protocol, int kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) struct kcm_net *knet = net_generic(net, kcm_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) struct kcm_mux *mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) switch (sock->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) case SOCK_DGRAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) sock->ops = &kcm_dgram_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) case SOCK_SEQPACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) sock->ops = &kcm_seqpacket_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return -ESOCKTNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (protocol != KCMPROTO_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /* Allocate a kcm mux, shared between KCM sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (!mux) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) sk_free(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) spin_lock_init(&mux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) spin_lock_init(&mux->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) INIT_LIST_HEAD(&mux->kcm_socks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) INIT_LIST_HEAD(&mux->kcm_rx_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) INIT_LIST_HEAD(&mux->kcm_tx_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) INIT_LIST_HEAD(&mux->psocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) INIT_LIST_HEAD(&mux->psocks_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) INIT_LIST_HEAD(&mux->psocks_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) mux->knet = knet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) /* Add new MUX to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) mutex_lock(&knet->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) knet->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) mutex_unlock(&knet->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) skb_queue_head_init(&mux->rx_hold_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /* Init KCM socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) sock_init_data(sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) init_kcm_sock(kcm_sk(sk), mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) static const struct net_proto_family kcm_family_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) .family = PF_KCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) .create = kcm_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) static __net_init int kcm_init_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct kcm_net *knet = net_generic(net, kcm_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) INIT_LIST_HEAD_RCU(&knet->mux_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) mutex_init(&knet->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) static __net_exit void kcm_exit_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) struct kcm_net *knet = net_generic(net, kcm_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) /* All KCM sockets should be closed at this point, which should mean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * that all multiplexors and psocks have been destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) WARN_ON(!list_empty(&knet->mux_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) static struct pernet_operations kcm_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) .init = kcm_init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) .exit = kcm_exit_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) .id = &kcm_net_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) .size = sizeof(struct kcm_net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) static int __init kcm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) kcm_muxp = kmem_cache_create("kcm_mux_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) sizeof(struct kcm_mux), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if (!kcm_muxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) kcm_psockp = kmem_cache_create("kcm_psock_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) sizeof(struct kcm_psock), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (!kcm_psockp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) kcm_wq = create_singlethread_workqueue("kkcmd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (!kcm_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) err = proto_register(&kcm_proto, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) err = register_pernet_device(&kcm_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto net_ops_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) err = sock_register(&kcm_family_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) goto sock_register_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) err = kcm_proc_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) goto proc_init_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) proc_init_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) sock_unregister(PF_KCM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) sock_register_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) unregister_pernet_device(&kcm_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) net_ops_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) proto_unregister(&kcm_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) kmem_cache_destroy(kcm_muxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) kmem_cache_destroy(kcm_psockp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (kcm_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) destroy_workqueue(kcm_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static void __exit kcm_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) kcm_proc_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) sock_unregister(PF_KCM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) unregister_pernet_device(&kcm_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) proto_unregister(&kcm_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) destroy_workqueue(kcm_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) kmem_cache_destroy(kcm_muxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) kmem_cache_destroy(kcm_psockp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) module_init(kcm_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) module_exit(kcm_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) MODULE_ALIAS_NETPROTO(PF_KCM);