^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NET4: Implementation of BSD Unix domain sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Fixes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Linus Torvalds : Assorted bug cures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Niibe Yutaka : async I/O support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Carsten Paeth : PF_UNIX check, address fixes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Alan Cox : Limit size of allocated blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Alan Cox : Fixed the stupid socketpair bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Alan Cox : BSD compatibility fine tuning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Alan Cox : Fixed a bug in connect when interrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Alan Cox : Sorted out a proper draft version of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * file descriptor passing hacked up from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Mike Shaver's work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Marty Leisner : Fixes to fd passing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Nick Nevin : recvmsg bugfix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Alan Cox : Started proper garbage collector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Heiko EiBfeldt : Missing verify_area check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Alan Cox : Started POSIXisms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Andreas Schwab : Replace inode by dentry for proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * reference counting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Kirk Petersen : Made this a module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Lots of bug fixes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Alexey Kuznetosv : Repaired (I hope) bugs introduces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * by above two patches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Andrea Arcangeli : If possible we block in connect(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * if the max backlog of the listen socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * is been reached. This won't break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * old apps and it will avoid huge amount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * of socks hashed (this for unix_gc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * performances reasons).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Security fix that limits the max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * number of socks to 2*max_files and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * the number of skb queueable in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * dgram receiver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Artur Skawina : Hash function optimizations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Malcolm Beattie : Set peercred for socketpair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Michal Ostrowski : Module initialization cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * the core infrastructure is doing that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * for all net proto families now (2.5.69+)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Known differences from reference BSD that was tested:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * [TO FIX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * ECONNREFUSED is not returned from one end of a connected() socket to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * other the moment one end closes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * fstat() doesn't return st_dev=0, and give the blksize as high water mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * and a fake inode identifier (nor the BSD first socket fstat twice bug).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * [NOT TO FIX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * accept() returns a path name even if the connecting socket has closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * in the meantime (BSD loses the path and gives up).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * accept() returns 0 length path for an unbound connector. BSD returns 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * socketpair(...SOCK_RAW..) doesn't panic the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * BSD af_unix apparently has connect forgetting to block properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * (need to check this with the POSIX spec in detail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Differences from 2.0.0-11-... (ANK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Bug fixes and improvements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * - client shutdown killed server socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * - removed all useless cli/sti pairs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Semantic changes/extensions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * - generic control message passing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * - SCM_CREDENTIALS control message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * - "Abstract" (not FS based) socket bindings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Abstract names are sequences of bytes (not zero terminated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * started by 0, so that this name space does not intersect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * with BSD names.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <linux/dcache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #include <linux/un.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #include <linux/termios.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #include <linux/sockios.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #include <net/tcp_states.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #include <net/af_unix.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #include <net/scm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #include "scm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) EXPORT_SYMBOL_GPL(unix_socket_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) DEFINE_SPINLOCK(unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) EXPORT_SYMBOL_GPL(unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static atomic_long_t unix_nr_socks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static struct hlist_head *unix_sockets_unbound(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long hash = (unsigned long)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) hash ^= hash >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) hash ^= hash >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) hash %= UNIX_HASH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return &unix_socket_table[UNIX_HASH_SIZE + hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #ifdef CONFIG_SECURITY_NETWORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) UNIXCB(skb).secid = scm->secid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) scm->secid = UNIXCB(skb).secid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return (scm->secid == UNIXCB(skb).secid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #endif /* CONFIG_SECURITY_NETWORK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * SMP locking strategy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * hash table is protected with spinlock unix_table_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * each socket state is protected by separate spin lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static inline unsigned int unix_hash_fold(__wsum n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned int hash = (__force unsigned int)csum_fold(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) hash ^= hash>>8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return hash&(UNIX_HASH_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define unix_peer(sk) (unix_sk(sk)->peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static inline int unix_our_peer(struct sock *sk, struct sock *osk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return unix_peer(osk) == sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static inline int unix_may_send(struct sock *sk, struct sock *osk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static inline int unix_recvq_full(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline int unix_recvq_full_lockless(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return skb_queue_len_lockless(&sk->sk_receive_queue) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) READ_ONCE(sk->sk_max_ack_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct sock *unix_peer_get(struct sock *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct sock *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unix_state_lock(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) peer = unix_peer(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) sock_hold(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unix_state_unlock(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) EXPORT_SYMBOL_GPL(unix_peer_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static inline void unix_release_addr(struct unix_address *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (refcount_dec_and_test(&addr->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) kfree(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * Check unix socket name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * - should be not zero length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * - if started by not zero, should be NULL terminated (FS object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * - if started by zero, it is abstract name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *hashp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (len <= sizeof(short) || len > sizeof(*sunaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!sunaddr || sunaddr->sun_family != AF_UNIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (sunaddr->sun_path[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * This may look like an off by one error but it is a bit more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * subtle. 108 is the longest valid AF_UNIX path for a binding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * sun_path[108] doesn't as such exist. However in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * we are guaranteed that it is a valid memory location in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * kernel address buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ((char *)sunaddr)[len] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) len = strlen(sunaddr->sun_path)+1+sizeof(short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void __unix_remove_socket(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) sk_del_node_init(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) WARN_ON(!sk_unhashed(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) sk_add_node(sk, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static inline void unix_remove_socket(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) spin_lock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) __unix_remove_socket(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) spin_unlock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) spin_lock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) __unix_insert_socket(list, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_unlock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static struct sock *__unix_find_socket_byname(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct sockaddr_un *sunname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int len, int type, unsigned int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct sock *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) sk_for_each(s, &unix_socket_table[hash ^ type]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct unix_sock *u = unix_sk(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (!net_eq(sock_net(s), net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (u->addr->len == len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) !memcmp(u->addr->name, sunname, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static inline struct sock *unix_find_socket_byname(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct sockaddr_un *sunname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int len, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct sock *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) spin_lock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) s = __unix_find_socket_byname(net, sunname, len, type, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) sock_hold(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_unlock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static struct sock *unix_find_socket_byinode(struct inode *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct sock *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) spin_lock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) sk_for_each(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct dentry *dentry = unix_sk(s)->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (dentry && d_backing_inode(dentry) == i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) sock_hold(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) s = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) spin_unlock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Support code for asymmetrically connected dgram sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * If a datagram socket is connected to a socket not itself connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * to the first socket (eg, /dev/log), clients may only enqueue more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * messages if the present receive queue of the server socket is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * "too large". This means there's a second writeability condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * poll and sendmsg need to test. The dgram recv code will do a wake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * up on the peer_wait wait queue of a socket upon reception of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * datagram which needs to be propagated to sleeping would-be writers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * since these might not have sent anything so far. This can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * accomplished via poll_wait because the lifetime of the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * socket might be less than that of its clients if these break their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * association with it or if the server socket is closed while clients
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * are still connected to it and there's no way to inform "a polling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * implementation" that it should let go of a certain wait queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * In order to propagate a wake up, a wait_queue_entry_t of the client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * socket is enqueued on the peer_wait queue of the server socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * whose wake function does a wake_up on the ordinary client socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * wait queue. This connection is established whenever a write (or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * poll for write) hit the flow control condition and broken when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * association to the server socket is dissolved or after a wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * was relayed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct unix_sock *u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) wait_queue_head_t *u_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) u = container_of(q, struct unix_sock, peer_wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) u->peer_wake.private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* relaying can only happen while the wq still exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) u_sleep = sk_sleep(&u->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (u_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) wake_up_interruptible_poll(u_sleep, key_to_poll(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct unix_sock *u, *u_other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) u_other = unix_sk(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) spin_lock(&u_other->peer_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!u->peer_wake.private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u->peer_wake.private = other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) spin_unlock(&u_other->peer_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static void unix_dgram_peer_wake_disconnect(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct sock *other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct unix_sock *u, *u_other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u_other = unix_sk(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spin_lock(&u_other->peer_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (u->peer_wake.private == other) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u->peer_wake.private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) spin_unlock(&u_other->peer_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct sock *other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unix_dgram_peer_wake_disconnect(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) wake_up_interruptible_poll(sk_sleep(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) EPOLLOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) EPOLLWRNORM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) EPOLLWRBAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* preconditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * - unix_peer(sk) == other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * - association is stable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) connected = unix_dgram_peer_wake_connect(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* If other is SOCK_DEAD, we want to make sure we signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * POLLOUT, such that a subsequent write() can get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * to other and its full, we will hang waiting for POLLOUT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) unix_dgram_peer_wake_disconnect(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static int unix_writable(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return sk->sk_state != TCP_LISTEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static void unix_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct socket_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (unix_writable(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (skwq_has_sleeper(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) wake_up_interruptible_sync_poll(&wq->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* When dgram socket disconnects (or changes its peer), we clear its receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * queue of packets arrived from previous peer. First, it allows to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * flow control based only on wmem_alloc; second, sk connected to peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * may receive messages only from that peer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!skb_queue_empty(&sk->sk_receive_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* If one link of bidirectional dgram pipe is disconnected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * we signal error. Messages are lost. Do not make this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * when peer was not connected to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) other->sk_err = ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) other->sk_error_report(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static void unix_sock_destructor(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) WARN_ON(refcount_read(&sk->sk_wmem_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) WARN_ON(!sk_unhashed(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) WARN_ON(sk->sk_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!sock_flag(sk, SOCK_DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) pr_info("Attempt to release alive unix socket: %p\n", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (u->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unix_release_addr(u->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) atomic_long_dec(&unix_nr_socks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #ifdef UNIX_REFCNT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) atomic_long_read(&unix_nr_socks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void unix_release_sock(struct sock *sk, int embrion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct sock *skpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unix_remove_socket(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* Clear state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) sock_orphan(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) sk->sk_shutdown = SHUTDOWN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) path = u->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) u->path.dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) u->path.mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) state = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) sk->sk_state = TCP_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) skpair = unix_peer(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unix_peer(sk) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) wake_up_interruptible_all(&u->peer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (skpair != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) unix_state_lock(skpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* No more writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) skpair->sk_shutdown = SHUTDOWN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) skpair->sk_err = ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unix_state_unlock(skpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) skpair->sk_state_change(skpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unix_dgram_peer_wake_disconnect(sk, skpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sock_put(skpair); /* It may now die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Try to flush out this socket. Throw out buffers at least */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (state == TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unix_release_sock(skb->sk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* passed fds are erased in the kfree_skb hook */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) UNIXCB(skb).consumed = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (path.dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* ---- Socket is dead now and most probably destroyed ---- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * Fixme: BSD difference: In BSD all sockets connected to us get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * ECONNRESET and we die on the spot. In Linux we behave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * like files and pipes do and wait for the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * dereference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Can't we simply set sock->err?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * What the above comment does talk about? --ANK(980817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (unix_tot_inflight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) unix_gc(); /* Garbage collect fds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static void init_peercred(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) const struct cred *old_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct pid *old_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) spin_lock(&sk->sk_peer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) old_pid = sk->sk_peer_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) old_cred = sk->sk_peer_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) sk->sk_peer_pid = get_pid(task_tgid(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) sk->sk_peer_cred = get_current_cred();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) spin_unlock(&sk->sk_peer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) put_pid(old_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) put_cred(old_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static void copy_peercred(struct sock *sk, struct sock *peersk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) const struct cred *old_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct pid *old_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (sk < peersk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) spin_lock(&sk->sk_peer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) spin_lock(&peersk->sk_peer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) old_pid = sk->sk_peer_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) old_cred = sk->sk_peer_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) spin_unlock(&sk->sk_peer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) spin_unlock(&peersk->sk_peer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) put_pid(old_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) put_cred(old_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static int unix_listen(struct socket *sock, int backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto out; /* Only stream/seqpacket sockets accept */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!u->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) goto out; /* No listens on an unbound socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (backlog > sk->sk_max_ack_backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) wake_up_interruptible_all(&u->peer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) sk->sk_max_ack_backlog = backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) sk->sk_state = TCP_LISTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* set credentials so connect can copy them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) init_peercred(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static int unix_release(struct socket *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static int unix_bind(struct socket *, struct sockaddr *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int unix_stream_connect(struct socket *, struct sockaddr *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int addr_len, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static int unix_socketpair(struct socket *, struct socket *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int unix_accept(struct socket *, struct socket *, int, bool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static int unix_getname(struct socket *, struct sockaddr *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static __poll_t unix_dgram_poll(struct file *, struct socket *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) poll_table *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static int unix_ioctl(struct socket *, unsigned int, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static int unix_shutdown(struct socket *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) size_t size, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct pipe_inode_info *, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static int unix_dgram_connect(struct socket *, struct sockaddr *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int unix_set_peek_off(struct sock *sk, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (mutex_lock_interruptible(&u->iolock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) sk->sk_peek_off = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mutex_unlock(&u->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct unix_sock *u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) u = unix_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) seq_printf(m, "scm_fds: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) atomic_read(&u->scm_stat.nr_fds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) #define unix_show_fdinfo NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static const struct proto_ops unix_stream_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) .family = PF_UNIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) .release = unix_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) .bind = unix_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) .connect = unix_stream_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) .socketpair = unix_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) .accept = unix_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) .getname = unix_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) .poll = unix_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) .ioctl = unix_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) .compat_ioctl = unix_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) .listen = unix_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) .shutdown = unix_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) .sendmsg = unix_stream_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) .recvmsg = unix_stream_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) .mmap = sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) .sendpage = unix_stream_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) .splice_read = unix_stream_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) .set_peek_off = unix_set_peek_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) .show_fdinfo = unix_show_fdinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static const struct proto_ops unix_dgram_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) .family = PF_UNIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) .release = unix_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) .bind = unix_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .connect = unix_dgram_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .socketpair = unix_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .accept = sock_no_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .getname = unix_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .poll = unix_dgram_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .ioctl = unix_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) .compat_ioctl = unix_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .listen = sock_no_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .shutdown = unix_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .sendmsg = unix_dgram_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .recvmsg = unix_dgram_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .mmap = sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .sendpage = sock_no_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) .set_peek_off = unix_set_peek_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) .show_fdinfo = unix_show_fdinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static const struct proto_ops unix_seqpacket_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .family = PF_UNIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .release = unix_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) .bind = unix_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) .connect = unix_stream_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) .socketpair = unix_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) .accept = unix_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) .getname = unix_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) .poll = unix_dgram_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) .ioctl = unix_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) .compat_ioctl = unix_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) .listen = unix_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) .shutdown = unix_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) .sendmsg = unix_seqpacket_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) .recvmsg = unix_seqpacket_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) .mmap = sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) .sendpage = sock_no_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) .set_peek_off = unix_set_peek_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) .show_fdinfo = unix_show_fdinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static struct proto unix_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) .name = "UNIX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) .obj_size = sizeof(struct unix_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct sock *sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct unix_sock *u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) atomic_long_inc(&unix_nr_socks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) sock_init_data(sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) sk->sk_allocation = GFP_KERNEL_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) sk->sk_write_space = unix_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) sk->sk_destruct = unix_sock_destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) u->path.dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) u->path.mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) spin_lock_init(&u->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) atomic_long_set(&u->inflight, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) INIT_LIST_HEAD(&u->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) mutex_init(&u->iolock); /* single task reading lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) mutex_init(&u->bindlock); /* single task binding lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) init_waitqueue_head(&u->peer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) memset(&u->scm_stat, 0, sizeof(struct scm_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) unix_insert_socket(unix_sockets_unbound(sk), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (sk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) atomic_long_dec(&unix_nr_socks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static int unix_create(struct net *net, struct socket *sock, int protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) int kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (protocol && protocol != PF_UNIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) sock->state = SS_UNCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) switch (sock->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) case SOCK_STREAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) sock->ops = &unix_stream_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Believe it or not BSD has AF_UNIX, SOCK_RAW though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * nothing uses it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) case SOCK_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) sock->type = SOCK_DGRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) case SOCK_DGRAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) sock->ops = &unix_dgram_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) case SOCK_SEQPACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) sock->ops = &unix_seqpacket_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return -ESOCKTNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static int unix_release(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) unix_release_sock(sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) sock->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static int unix_autobind(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) static u32 ordernum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct unix_address *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) unsigned int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) err = mutex_lock_interruptible(&u->bindlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (u->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) addr->name->sun_family = AF_UNIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) refcount_set(&addr->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) spin_lock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ordernum = (ordernum+1)&0xFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) addr->hash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) spin_unlock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * __unix_find_socket_byname() may take long time if many names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * are already in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* Give up if all names seems to be in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (retries++ == 0xFFFFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) kfree(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) addr->hash ^= sk->sk_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) __unix_remove_socket(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) smp_store_release(&u->addr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) __unix_insert_socket(&unix_socket_table[addr->hash], sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) spin_unlock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) out: mutex_unlock(&u->bindlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static struct sock *unix_find_other(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct sockaddr_un *sunname, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) int type, unsigned int hash, int *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct sock *u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (sunname->sun_path[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) inode = d_backing_inode(path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) err = inode_permission(inode, MAY_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) goto put_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) err = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (!S_ISSOCK(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) goto put_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) u = unix_find_socket_byinode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (!u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) goto put_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (u->sk_type == type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) touch_atime(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) err = -EPROTOTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (u->sk_type != type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) sock_put(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) err = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) u = unix_find_socket_byname(net, sunname, len, type, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (u) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) dentry = unix_sk(u)->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) touch_atime(&unix_sk(u)->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) put_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) *error = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * Get the parent directory, calculate the hash for last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * component.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) err = PTR_ERR(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * All right, let's create it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) err = security_path_mknod(&path, dentry, mode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) res->mnt = mntget(path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) res->dentry = dget(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) done_path_create(&path, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) char *sun_path = sunaddr->sun_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct unix_address *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct hlist_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct path path = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) sunaddr->sun_family != AF_UNIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (addr_len == sizeof(short)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) err = unix_autobind(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) err = unix_mkname(sunaddr, addr_len, &hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) addr_len = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (sun_path[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) umode_t mode = S_IFSOCK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) (SOCK_INODE(sock)->i_mode & ~current_umask());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) err = unix_mknod(sun_path, mode, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (err == -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) err = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) err = mutex_lock_interruptible(&u->bindlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (u->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) memcpy(addr->name, sunaddr, addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) addr->len = addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) addr->hash = hash ^ sk->sk_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) refcount_set(&addr->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (sun_path[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) addr->hash = UNIX_HASH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) spin_lock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) u->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) list = &unix_socket_table[hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) spin_lock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) err = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (__unix_find_socket_byname(net, sunaddr, addr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) sk->sk_type, hash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) unix_release_addr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) list = &unix_socket_table[addr->hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) __unix_remove_socket(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) smp_store_release(&u->addr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) __unix_insert_socket(list, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) spin_unlock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) out_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) mutex_unlock(&u->bindlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (unlikely(sk1 == sk2) || !sk2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) unix_state_lock(sk1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (sk1 < sk2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) unix_state_lock(sk1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) unix_state_lock_nested(sk2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unix_state_lock(sk2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) unix_state_lock_nested(sk1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (unlikely(sk1 == sk2) || !sk2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) unix_state_unlock(sk1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) unix_state_unlock(sk1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) unix_state_unlock(sk2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int alen, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct sock *other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (alen < offsetofend(struct sockaddr, sa_family))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (addr->sa_family != AF_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) err = unix_mkname(sunaddr, alen, &hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) alen = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (test_bit(SOCK_PASSCRED, &sock->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (!other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) unix_state_double_lock(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* Apparently VFS overslept socket death. Retry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (sock_flag(other, SOCK_DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) unix_state_double_unlock(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (!unix_may_send(sk, other))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) err = security_unix_may_send(sk->sk_socket, other->sk_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * 1003.1g breaking connected state with AF_UNSPEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) other = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) unix_state_double_lock(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * If it was connected, reconnect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (unix_peer(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct sock *old_peer = unix_peer(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) unix_peer(sk) = other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) unix_state_double_unlock(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (other != old_peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) unix_dgram_disconnected(sk, old_peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) sock_put(old_peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) unix_peer(sk) = other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) unix_state_double_unlock(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) unix_state_double_unlock(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static long unix_wait_for_peer(struct sock *other, long timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) __releases(&unix_sk(other)->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct unix_sock *u = unix_sk(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) int sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) sched = !sock_flag(other, SOCK_DEAD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) !(other->sk_shutdown & RCV_SHUTDOWN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) unix_recvq_full(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) timeo = schedule_timeout(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) finish_wait(&u->peer_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) int addr_len, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct unix_sock *u = unix_sk(sk), *newu, *otheru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct sock *newsk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct sock *other = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) err = unix_mkname(sunaddr, addr_len, &hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) addr_len = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) (err = unix_autobind(sock)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* First of all allocate resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) If we will make it after state is locked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) we will have to recheck all again in any case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /* create new sock for complete connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) newsk = unix_create1(sock_net(sk), NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (newsk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* Allocate skb for sending to listening sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* Find listening sock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (!other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /* Latch state of peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) unix_state_lock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /* Apparently VFS overslept socket death. Retry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (sock_flag(other, SOCK_DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) err = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (other->sk_state != TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (other->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (unix_recvq_full(other)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (!timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) timeo = unix_wait_for_peer(other, timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) err = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* Latch our state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) It is tricky place. We need to grab our state lock and cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) drop lock on peer. It is dangerous because deadlock is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) possible. Connect to self case and simultaneous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) attempt to connect are eliminated by checking socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) state. other is TCP_LISTEN, if sk is TCP_LISTEN we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) check this before attempt to grab lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) Well, and we have to recheck the state after socket locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) st = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) switch (st) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) case TCP_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* This is ok... continue with connect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) case TCP_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* Socket is already connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) err = -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) unix_state_lock_nested(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (sk->sk_state != st) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) err = security_unix_stream_connect(sk, other, newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* The way is open! Fastly set all the necessary fields... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) unix_peer(newsk) = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) newsk->sk_state = TCP_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) newsk->sk_type = sk->sk_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) init_peercred(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) newu = unix_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) otheru = unix_sk(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* copy address information from listening to new sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * The contents of *(otheru->addr) and otheru->path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * are seen fully set up here, since we have found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * otheru in hash under unix_table_lock. Insertion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * into the hash chain we'd found it in had been done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * in an earlier critical area protected by unix_table_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * the same one where we'd set *(otheru->addr) contents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * as well as otheru->path and otheru->addr itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * Using smp_store_release() here to set newu->addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * is enough to make those stores, as well as stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * to newu->path visible to anyone who gets newu->addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * by smp_load_acquire(). IOW, the same warranties
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * as for unix_sock instances bound in unix_bind() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * in unix_autobind().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (otheru->path.dentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) path_get(&otheru->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) newu->path = otheru->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) refcount_inc(&otheru->addr->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) smp_store_release(&newu->addr, otheru->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) /* Set credentials */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) copy_peercred(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) sock->state = SS_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) sk->sk_state = TCP_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) sock_hold(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) unix_peer(sk) = newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /* take ten and and send info to listening sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) spin_lock(&other->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) __skb_queue_tail(&other->sk_receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) spin_unlock(&other->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) other->sk_data_ready(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (newsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) unix_release_sock(newsk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static int unix_socketpair(struct socket *socka, struct socket *sockb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct sock *ska = socka->sk, *skb = sockb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /* Join our sockets back to back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) sock_hold(ska);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) sock_hold(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) unix_peer(ska) = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) unix_peer(skb) = ska;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) init_peercred(ska);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) init_peercred(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (ska->sk_type != SOCK_DGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) ska->sk_state = TCP_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) skb->sk_state = TCP_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) socka->state = SS_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) sockb->state = SS_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static void unix_sock_inherit_flags(const struct socket *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct socket *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (test_bit(SOCK_PASSCRED, &old->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) set_bit(SOCK_PASSCRED, &new->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (test_bit(SOCK_PASSSEC, &old->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) set_bit(SOCK_PASSSEC, &new->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) bool kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) struct sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (sk->sk_state != TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) /* If socket state is TCP_LISTEN it cannot change (for now...),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * so that no locks are necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* This means receive shutdown. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) tsk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) skb_free_datagram(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) wake_up_interruptible(&unix_sk(sk)->peer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* attach accepted sock to socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) unix_state_lock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) newsock->state = SS_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) unix_sock_inherit_flags(sock, newsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) sock_graft(tsk, newsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) unix_state_unlock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct unix_address *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) sk = unix_peer_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) addr = smp_load_acquire(&unix_sk(sk)->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) sunaddr->sun_family = AF_UNIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) sunaddr->sun_path[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) err = sizeof(short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) err = addr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) memcpy(sunaddr, addr->name, addr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) scm->fp = scm_fp_dup(UNIXCB(skb).fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * Garbage collection of unix sockets starts by selecting a set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * candidate sockets which have reference only from being in flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * (total_refs == inflight_refs). This condition is checked once during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * the candidate collection phase, and candidates are marked as such, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * that non-candidates can later be ignored. While inflight_refs is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * protected by unix_gc_lock, total_refs (file count) is not, hence this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * is an instantaneous decision.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * Once a candidate, however, the socket must not be reinstalled into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * file descriptor while the garbage collection is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * If the above conditions are met, then the directed graph of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * candidates (*) does not change while unix_gc_lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * Any operations that changes the file count through file descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * (dup, close, sendmsg) does not change the graph since candidates are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * not installed in fds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * Dequeing a candidate via recvmsg would install it into an fd, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * that takes unix_gc_lock to decrement the inflight count, so it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * serialized with garbage collection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * MSG_PEEK is special in that it does not change the inflight count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * yet does install the socket into an fd. The following lock/unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * pair is to ensure serialization with garbage collection. It must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * done between incrementing the file count and installing the file into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * an fd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * If garbage collection starts after the barrier provided by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * lock/unlock, then it will see the elevated refcount and not mark this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * as a candidate. If a garbage collection is already in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * before the file count was incremented, then the lock/unlock pair will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * ensure that garbage collection is finished before progressing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * installing the fd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * (*) A -> B where B is on the queue of A or B is on the queue of C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * which is on the queue of listening socket A.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) spin_lock(&unix_gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) spin_unlock(&unix_gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) UNIXCB(skb).pid = get_pid(scm->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) UNIXCB(skb).uid = scm->creds.uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) UNIXCB(skb).gid = scm->creds.gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) UNIXCB(skb).fp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) unix_get_secdata(scm, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (scm->fp && send_fds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) err = unix_attach_fds(scm, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) skb->destructor = unix_destruct_scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static bool unix_passcred_enabled(const struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) const struct sock *other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return test_bit(SOCK_PASSCRED, &sock->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) !other->sk_socket ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * Some apps rely on write() giving SCM_CREDENTIALS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * We include credentials if source or destination socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * asserted SOCK_PASSCRED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) const struct sock *other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (UNIXCB(skb).pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (unix_passcred_enabled(sock, other)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) UNIXCB(skb).pid = get_pid(task_tgid(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) static int maybe_init_creds(struct scm_cookie *scm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) struct socket *socket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) const struct sock *other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct msghdr msg = { .msg_controllen = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) err = scm_send(socket, &msg, scm, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (unix_passcred_enabled(socket, other)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) scm->pid = get_pid(task_tgid(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) current_uid_gid(&scm->creds.uid, &scm->creds.gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static bool unix_skb_scm_eq(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) struct scm_cookie *scm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) const struct unix_skb_parms *u = &UNIXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) return u->pid == scm->pid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) uid_eq(u->uid, scm->creds.uid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) gid_eq(u->gid, scm->creds.gid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) unix_secdata_eq(scm, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct scm_fp_list *fp = UNIXCB(skb).fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (unlikely(fp && fp->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) atomic_add(fp->count, &u->scm_stat.nr_fds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct scm_fp_list *fp = UNIXCB(skb).fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (unlikely(fp && fp->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) atomic_sub(fp->count, &u->scm_stat.nr_fds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * Send AF_UNIX data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) struct sock *other = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) int namelen = 0; /* fake GCC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) struct scm_cookie scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) int data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) int sk_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) wait_for_unix_gc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) err = scm_send(sock, msg, &scm, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (msg->msg_flags&MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (msg->msg_namelen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) namelen = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) sunaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) other = unix_peer_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (!other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) && (err = unix_autobind(sock)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (len > sk->sk_sndbuf - 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (len > SKB_MAX_ALLOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) data_len = min_t(size_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) len - SKB_MAX_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) MAX_SKB_FRAGS * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) data_len = PAGE_ALIGN(data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) msg->msg_flags & MSG_DONTWAIT, &err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) PAGE_ALLOC_COSTLY_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) err = unix_scm_to_skb(&scm, skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) skb_put(skb, len - data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) skb->data_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) skb->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (!other) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) err = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (sunaddr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) hash, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (other == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (sk_filter(other, skb) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /* Toss the packet but do not return any error to the sender */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) err = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) sk_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) unix_state_lock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) restart_locked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (!unix_may_send(sk, other))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (unlikely(sock_flag(other, SOCK_DEAD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * Check with 1003.1g - what should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) * datagram error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (!sk_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (unix_peer(sk) == other) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) unix_peer(sk) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) unix_dgram_peer_wake_disconnect_wakeup(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) unix_dgram_disconnected(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) err = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) other = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) err = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (other->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) if (sk->sk_type != SOCK_SEQPACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) err = security_unix_may_send(sk->sk_socket, other->sk_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /* other == sk && unix_peer(other) != sk if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * - unix_peer(sk) == NULL, destination address bound to sk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * - unix_peer(sk) == sk by time of get but disconnected before lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (other != sk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) unlikely(unix_peer(other) != sk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) unix_recvq_full_lockless(other))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (timeo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) timeo = unix_wait_for_peer(other, timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) err = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (!sk_locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) unix_state_double_lock(sk, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (unix_peer(sk) != other ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) unix_dgram_peer_wake_me(sk, other)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) sk_locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (!sk_locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) sk_locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) goto restart_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (unlikely(sk_locked))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (sock_flag(other, SOCK_RCVTSTAMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) __net_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) maybe_add_creds(skb, sock, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) scm_stat_add(other, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) skb_queue_tail(&other->sk_receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) other->sk_data_ready(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) scm_destroy(&scm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (sk_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) scm_destroy(&scm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) /* We use paged skbs for stream sockets, and limit occupancy to 32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * bytes, and a minimum of a full page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) struct sock *other = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) int err, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) int sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) struct scm_cookie scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) bool fds_sent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) int data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) wait_for_unix_gc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) err = scm_send(sock, msg, &scm, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (msg->msg_flags&MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) if (msg->msg_namelen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) other = unix_peer(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (!other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (sk->sk_shutdown & SEND_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) goto pipe_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) while (sent < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) size = len - sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) /* Keep two messages in the pipe so it schedules better */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) /* allow fallback to order-0 allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) msg->msg_flags & MSG_DONTWAIT, &err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) get_order(UNIX_SKB_FRAGS_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) /* Only send the fds in the first buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) err = unix_scm_to_skb(&scm, skb, !fds_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) fds_sent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) skb_put(skb, size - data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) skb->data_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) skb->len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) unix_state_lock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (sock_flag(other, SOCK_DEAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) (other->sk_shutdown & RCV_SHUTDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) goto pipe_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) maybe_add_creds(skb, sock, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) scm_stat_add(other, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) skb_queue_tail(&other->sk_receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) other->sk_data_ready(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) sent += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) scm_destroy(&scm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) pipe_err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) pipe_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) send_sig(SIGPIPE, current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) err = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) scm_destroy(&scm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return sent ? : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) int offset, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) bool send_sigpipe = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) bool init_scm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) struct scm_cookie scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) struct sock *other, *sk = socket->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) struct sk_buff *skb, *newskb = NULL, *tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (flags & MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) other = unix_peer(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (!other || sk->sk_state != TCP_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (false) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) alloc_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) mutex_unlock(&unix_sk(other)->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) &err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (!newskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) /* we must acquire iolock as we modify already present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * skbs in the sk_receive_queue and mess with skb->len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) err = mutex_lock_interruptible(&unix_sk(other)->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (sk->sk_shutdown & SEND_SHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) err = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) send_sigpipe = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) unix_state_lock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (sock_flag(other, SOCK_DEAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) other->sk_shutdown & RCV_SHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) err = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) send_sigpipe = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) goto err_state_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (init_scm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) err = maybe_init_creds(&scm, socket, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) goto err_state_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) init_scm = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) skb = skb_peek_tail(&other->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (tail && tail == skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) skb = newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (newskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) skb = newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) tail = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto alloc_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) } else if (newskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* this is fast path, we don't necessarily need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * call to kfree_skb even though with newskb == NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * this - does no harm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) consume_skb(newskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) newskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) if (skb_append_pagefrags(skb, page, offset, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) tail = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) goto alloc_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) skb->len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) skb->data_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) skb->truesize += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) refcount_add(size, &sk->sk_wmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (newskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) err = unix_scm_to_skb(&scm, skb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) goto err_state_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) spin_lock(&other->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) __skb_queue_tail(&other->sk_receive_queue, newskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) spin_unlock(&other->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) mutex_unlock(&unix_sk(other)->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) other->sk_data_ready(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) scm_destroy(&scm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) err_state_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) mutex_unlock(&unix_sk(other)->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) kfree_skb(newskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (send_sigpipe && !(flags & MSG_NOSIGNAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) send_sig(SIGPIPE, current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (!init_scm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) scm_destroy(&scm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) err = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (sk->sk_state != TCP_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) if (msg->msg_namelen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) msg->msg_namelen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return unix_dgram_sendmsg(sock, msg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (sk->sk_state != TCP_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) return unix_dgram_recvmsg(sock, msg, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) msg->msg_namelen = addr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) memcpy(msg->msg_name, addr->name, addr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) struct scm_cookie scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) struct sk_buff *skb, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) int skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (flags&MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) mutex_lock(&u->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) skip = sk_peek_offset(sk, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) &skip, &err, &last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (!(flags & MSG_PEEK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) scm_stat_del(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) mutex_unlock(&u->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (err != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) } while (timeo &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) &err, &timeo, last));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (!skb) { /* implies iolock unlocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) (sk->sk_shutdown & RCV_SHUTDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (wq_has_sleeper(&u->peer_wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) wake_up_interruptible_sync_poll(&u->peer_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) EPOLLOUT | EPOLLWRNORM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) EPOLLWRBAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (msg->msg_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) unix_copy_addr(msg, skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) if (size > skb->len - skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) size = skb->len - skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) else if (size < skb->len - skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) msg->msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) err = skb_copy_datagram_msg(skb, skip, msg, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (sock_flag(sk, SOCK_RCVTSTAMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) __sock_recv_timestamp(msg, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) memset(&scm, 0, sizeof(scm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) unix_set_secdata(&scm, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (!(flags & MSG_PEEK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (UNIXCB(skb).fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) unix_detach_fds(&scm, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) sk_peek_offset_bwd(sk, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) /* It is questionable: on PEEK we could:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) - do not return fds - good, but too simple 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) - return fds, and do not return them on read (old strategy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) apparently wrong)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) - clone fds (I chose it for now, it is the most universal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) solution)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) POSIX 1003.1g does not actually define this clearly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) at all. POSIX 1003.1g doesn't define a lot of things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) clearly however!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) sk_peek_offset_fwd(sk, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (UNIXCB(skb).fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) unix_peek_fds(&scm, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) err = (flags & MSG_TRUNC) ? skb->len - skip : size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) scm_recv(sock, msg, &scm, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) skb_free_datagram(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) mutex_unlock(&u->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * Sleep until more data has arrived. But check for races..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) static long unix_stream_data_wait(struct sock *sk, long timeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct sk_buff *last, unsigned int last_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) bool freezable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) struct sk_buff *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) tail = skb_peek_tail(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (tail != last ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) (tail && tail->len != last_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) sk->sk_err ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) (sk->sk_shutdown & RCV_SHUTDOWN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) signal_pending(current) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) !timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (freezable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) timeo = freezable_schedule_timeout(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) timeo = schedule_timeout(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (sock_flag(sk, SOCK_DEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) finish_wait(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) return timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static unsigned int unix_skb_len(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) return skb->len - UNIXCB(skb).consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) struct unix_stream_read_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) int (*recv_actor)(struct sk_buff *, int, int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) struct unix_stream_read_state *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) struct socket *socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) struct msghdr *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct pipe_inode_info *pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) unsigned int splice_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) static int unix_stream_read_generic(struct unix_stream_read_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) bool freezable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) struct scm_cookie scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct socket *sock = state->socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) struct unix_sock *u = unix_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) int copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) int flags = state->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) int noblock = flags & MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) bool check_creds = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) int skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) size_t size = state->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) unsigned int last_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (unlikely(flags & MSG_OOB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) timeo = sock_rcvtimeo(sk, noblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) memset(&scm, 0, sizeof(scm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) /* Lock the socket to prevent queue disordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * while sleeps in memcpy_tomsg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) mutex_lock(&u->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) skip = max(sk_peek_offset(sk, flags), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) int chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) bool drop_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) struct sk_buff *skb, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) redo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (sock_flag(sk, SOCK_DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) err = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) last = skb = skb_peek(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) last_len = last ? last->len : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) if (copied >= target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) * POSIX 1003.1g mandates this order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) err = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (sk->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (!timeo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) mutex_unlock(&u->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) timeo = unix_stream_data_wait(sk, timeo, last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) last_len, freezable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) err = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) scm_destroy(&scm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) mutex_lock(&u->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) goto redo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) while (skip >= unix_skb_len(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) skip -= unix_skb_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) last = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) last_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) skb = skb_peek_next(skb, &sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (check_creds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /* Never glue messages from different writers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if (!unix_skb_scm_eq(skb, &scm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) /* Copy credentials */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) unix_set_secdata(&scm, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) check_creds = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) /* Copy address just once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (state->msg && state->msg->msg_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) state->msg->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) unix_copy_addr(state->msg, skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) sunaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) chunk = state->recv_actor(skb, skip, chunk, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) drop_skb = !unix_skb_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) /* skb is only safe to use if !drop_skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (chunk < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (copied == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) copied = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) copied += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) size -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) if (drop_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) /* the skb was touched by a concurrent reader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * we should not expect anything from this skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * anymore and assume it invalid - we can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) * sure it was dropped from the socket queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) * let's report a short read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) /* Mark read part of skb as used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (!(flags & MSG_PEEK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) UNIXCB(skb).consumed += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) sk_peek_offset_bwd(sk, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (UNIXCB(skb).fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) scm_stat_del(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) unix_detach_fds(&scm, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (unix_skb_len(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) skb_unlink(skb, &sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (scm.fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) /* It is questionable, see note in unix_dgram_recvmsg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (UNIXCB(skb).fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) unix_peek_fds(&scm, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) sk_peek_offset_fwd(sk, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (UNIXCB(skb).fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) last = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) last_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) skb = skb_peek_next(skb, &sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) } while (size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) mutex_unlock(&u->iolock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (state->msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) scm_recv(sock, state->msg, &scm, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) scm_destroy(&scm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) return copied ? : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) static int unix_stream_read_actor(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) int skip, int chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) struct unix_stream_read_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) state->msg, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return ret ?: chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) struct unix_stream_read_state state = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) .recv_actor = unix_stream_read_actor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) .socket = sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) .msg = msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) .size = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) .flags = flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) return unix_stream_read_generic(&state, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) static int unix_stream_splice_actor(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) int skip, int chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) struct unix_stream_read_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return skb_splice_bits(skb, state->socket->sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) UNIXCB(skb).consumed + skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) state->pipe, chunk, state->splice_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) size_t size, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) struct unix_stream_read_state state = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) .recv_actor = unix_stream_splice_actor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) .socket = sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) .pipe = pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) .size = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) .splice_flags = flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) if (unlikely(*ppos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) return -ESPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (sock->file->f_flags & O_NONBLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) flags & SPLICE_F_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) state.flags = MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) return unix_stream_read_generic(&state, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) static int unix_shutdown(struct socket *sock, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) struct sock *other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (mode < SHUT_RD || mode > SHUT_RDWR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) /* This maps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) * SHUT_RD (0) -> RCV_SHUTDOWN (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) * SHUT_WR (1) -> SEND_SHUTDOWN (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) ++mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) sk->sk_shutdown |= mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) other = unix_peer(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) sock_hold(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (other &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) int peer_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if (mode&RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) peer_mode |= SEND_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (mode&SEND_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) peer_mode |= RCV_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) unix_state_lock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) other->sk_shutdown |= peer_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) unix_state_unlock(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) other->sk_state_change(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (peer_mode == SHUTDOWN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) else if (peer_mode & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) sock_put(other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) long unix_inq_len(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) long amount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (sk->sk_state == TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) spin_lock(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) if (sk->sk_type == SOCK_STREAM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) sk->sk_type == SOCK_SEQPACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) skb_queue_walk(&sk->sk_receive_queue, skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) amount += unix_skb_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) skb = skb_peek(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) amount = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) spin_unlock(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) return amount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) EXPORT_SYMBOL_GPL(unix_inq_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) long unix_outq_len(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) return sk_wmem_alloc_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) EXPORT_SYMBOL_GPL(unix_outq_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) static int unix_open_file(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) struct file *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (!smp_load_acquire(&unix_sk(sk)->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) path = unix_sk(sk)->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) if (!path.dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) path_get(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) fd = get_unused_fd_flags(O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) f = dentry_open(&path, O_PATH, current_cred());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (IS_ERR(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) put_unused_fd(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) fd = PTR_ERR(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) fd_install(fd, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) long amount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) case SIOCOUTQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) amount = unix_outq_len(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) err = put_user(amount, (int __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) case SIOCINQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) amount = unix_inq_len(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) if (amount < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) err = amount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) err = put_user(amount, (int __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) case SIOCUNIXFILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) err = unix_open_file(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) err = -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) __poll_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) sock_poll_wait(file, sock, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) /* exceptional events? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (sk->sk_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) mask |= EPOLLERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (sk->sk_shutdown == SHUTDOWN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) mask |= EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) if (sk->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) /* readable? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) /* Connection-based need to check for termination and startup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) sk->sk_state == TCP_CLOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) mask |= EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) * we set writable also when the other side has shut down the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) * connection. This prevents stuck sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) if (unix_writable(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) struct sock *sk = sock->sk, *other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) unsigned int writable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) __poll_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) sock_poll_wait(file, sock, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) /* exceptional events? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) mask |= EPOLLERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) if (sk->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (sk->sk_shutdown == SHUTDOWN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) mask |= EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) /* readable? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) /* Connection-based need to check for termination and startup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) if (sk->sk_type == SOCK_SEQPACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) if (sk->sk_state == TCP_CLOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) mask |= EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) /* connection hasn't started yet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) if (sk->sk_state == TCP_SYN_SENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) /* No write status requested, avoid expensive OUT tests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) writable = unix_writable(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (writable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) unix_state_lock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) other = unix_peer(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) if (other && unix_peer(other) != sk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) unix_recvq_full_lockless(other) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) unix_dgram_peer_wake_me(sk, other))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) writable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) unix_state_unlock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) if (writable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) #define get_bucket(x) ((x) >> BUCKET_SPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) unsigned long offset = get_offset(*pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) unsigned long bucket = get_bucket(*pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) unsigned long count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) if (sock_net(sk) != seq_file_net(seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) if (++count == offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) static struct sock *unix_next_socket(struct seq_file *seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) unsigned long bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) while (sk > (struct sock *)SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) sk = sk_next(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) goto next_bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) if (sock_net(sk) == seq_file_net(seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) sk = unix_from_bucket(seq, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) next_bucket:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) bucket = get_bucket(*pos) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) *pos = set_bucket_offset(bucket, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) } while (bucket < ARRAY_SIZE(unix_socket_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) __acquires(unix_table_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) spin_lock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (!*pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) return SEQ_START_TOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) return unix_next_socket(seq, NULL, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) ++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) return unix_next_socket(seq, v, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) static void unix_seq_stop(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) __releases(unix_table_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) spin_unlock(&unix_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) static int unix_seq_show(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) if (v == SEQ_START_TOKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) seq_puts(seq, "Num RefCount Protocol Flags Type St "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) "Inode Path\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) struct sock *s = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) struct unix_sock *u = unix_sk(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) unix_state_lock(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) refcount_read(&s->sk_refcnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) s->sk_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) s->sk_socket ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) sock_i_ino(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) if (u->addr) { // under unix_table_lock here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) int i, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) seq_putc(seq, ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) len = u->addr->len - sizeof(short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) if (!UNIX_ABSTRACT(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) seq_putc(seq, '@');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) for ( ; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) seq_putc(seq, u->addr->name->sun_path[i] ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) '@');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) unix_state_unlock(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) seq_putc(seq, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) static const struct seq_operations unix_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) .start = unix_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) .next = unix_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) .stop = unix_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) .show = unix_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) static const struct net_proto_family unix_family_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) .family = PF_UNIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) .create = unix_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) static int __net_init unix_net_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) int error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) net->unx.sysctl_max_dgram_qlen = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) if (unix_sysctl_register(net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) sizeof(struct seq_net_private))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) unix_sysctl_unregister(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) static void __net_exit unix_net_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) unix_sysctl_unregister(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) remove_proc_entry("unix", net->proc_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) static struct pernet_operations unix_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) .init = unix_net_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) .exit = unix_net_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) static int __init af_unix_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) int rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) rc = proto_register(&unix_proto, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) sock_register(&unix_family_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) register_pernet_subsys(&unix_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) static void __exit af_unix_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) sock_unregister(PF_UNIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) proto_unregister(&unix_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) unregister_pernet_subsys(&unix_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) /* Earlier than device_initcall() so that other drivers invoking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) request_module() don't end up in a loop when modprobe tries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) to use a UNIX socket. But later than subsys_initcall() because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) we depend on stuff initialised there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) fs_initcall(af_unix_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) module_exit(af_unix_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) MODULE_ALIAS_NETPROTO(PF_UNIX);