^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _RDS_TCP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _RDS_TCP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #define RDS_TCP_PORT 16385
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) struct rds_tcp_incoming {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) struct rds_incoming ti_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) struct sk_buff_head ti_skb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct rds_tcp_connection {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct list_head t_tcp_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) bool t_tcp_node_detached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct rds_conn_path *t_cpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /* t_conn_path_lock synchronizes the connection establishment between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * rds_tcp_accept_one and rds_tcp_conn_path_connect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct mutex t_conn_path_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct socket *t_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void *t_orig_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void *t_orig_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void *t_orig_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct rds_tcp_incoming *t_tinc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) size_t t_tinc_hdr_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) size_t t_tinc_data_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* XXX error report? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct work_struct t_conn_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct work_struct t_send_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct work_struct t_down_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct work_struct t_recv_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* for info exporting only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct list_head t_list_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u32 t_last_sent_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 t_last_expected_una;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 t_last_seen_una;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct rds_tcp_statistics {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) uint64_t s_tcp_data_ready_calls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) uint64_t s_tcp_write_space_calls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) uint64_t s_tcp_sndbuf_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) uint64_t s_tcp_connect_raced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) uint64_t s_tcp_listen_closed_stale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* tcp.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void rds_tcp_tune(struct socket *sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void rds_tcp_restore_callbacks(struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct rds_tcp_connection *tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) extern struct rds_transport rds_tcp_transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void rds_tcp_accept_work(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) __u32 scope_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* tcp_connect.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int rds_tcp_conn_path_connect(struct rds_conn_path *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void rds_tcp_conn_path_shutdown(struct rds_conn_path *conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void rds_tcp_state_change(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* tcp_listen.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct socket *rds_tcp_listen_init(struct net *net, bool isv6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void rds_tcp_listen_data_ready(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int rds_tcp_accept_one(struct socket *sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void rds_tcp_keepalive(struct socket *sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void *rds_tcp_listen_sock_def_readable(struct net *net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* tcp_recv.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int rds_tcp_recv_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void rds_tcp_recv_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) void rds_tcp_data_ready(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int rds_tcp_recv_path(struct rds_conn_path *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void rds_tcp_inc_free(struct rds_incoming *inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* tcp_send.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void rds_tcp_xmit_path_prepare(struct rds_conn_path *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void rds_tcp_xmit_path_complete(struct rds_conn_path *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned int hdr_off, unsigned int sg, unsigned int off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void rds_tcp_write_space(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* tcp_stats.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define rds_tcp_stats_inc(member) rds_stats_inc_which(rds_tcp_stats, member)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned int avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #endif