Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* XDP sockets monitoring support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright(c) 2019 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: Björn Töpel <bjorn.topel@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <net/xdp_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/xdp_diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/sock_diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "xsk_queue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "xsk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct xdp_diag_info di = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	di.ifindex = xs->dev ? xs->dev->ifindex : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	di.queue_id = xs->queue_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 			     struct sk_buff *nlskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	struct xdp_diag_ring dr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	dr.entries = queue->nentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	return nla_put(nlskb, nl_type, sizeof(dr), &dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 				  struct sk_buff *nlskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (xs->rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	if (!err && xs->tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct xsk_buff_pool *pool = xs->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct xdp_umem *umem = xs->umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct xdp_diag_umem du = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (!umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	du.id = umem->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	du.size = umem->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	du.num_pages = umem->npgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	du.chunk_size = umem->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	du.headroom = umem->headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	du.queue_id = pool ? pool->queue_id : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	du.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	if (umem->zc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		du.flags |= XDP_DU_F_ZEROCOPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	du.refs = refcount_read(&umem->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (!err && pool && pool->fq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		err = xsk_diag_put_ring(pool->fq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 					XDP_DIAG_UMEM_FILL_RING, nlskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (!err && pool && pool->cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		err = xsk_diag_put_ring(pool->cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 					XDP_DIAG_UMEM_COMPLETION_RING, nlskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	struct xdp_diag_stats du = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	du.n_rx_dropped = xs->rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	du.n_rx_full = xs->rx_queue_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			 struct xdp_diag_req *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			 struct user_namespace *user_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			 u32 portid, u32 seq, u32 flags, int sk_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct xdp_sock *xs = xdp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct xdp_diag_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	msg = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	memset(msg, 0, sizeof(*msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	msg->xdiag_family = AF_XDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	msg->xdiag_type = sk->sk_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	msg->xdiag_ino = sk_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	sock_diag_save_cookie(sk, msg->xdiag_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	mutex_lock(&xs->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		goto out_nlmsg_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	if ((req->xdiag_show & XDP_SHOW_INFO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	    nla_put_u32(nlskb, XDP_DIAG_UID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			from_kuid_munged(user_ns, sock_i_uid(sk))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		goto out_nlmsg_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	    xsk_diag_put_rings_cfg(xs, nlskb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		goto out_nlmsg_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if ((req->xdiag_show & XDP_SHOW_UMEM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	    xsk_diag_put_umem(xs, nlskb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		goto out_nlmsg_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	    sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		goto out_nlmsg_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if ((req->xdiag_show & XDP_SHOW_STATS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	    xsk_diag_put_stats(xs, nlskb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		goto out_nlmsg_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	mutex_unlock(&xs->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	nlmsg_end(nlskb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) out_nlmsg_trim:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	mutex_unlock(&xs->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	nlmsg_cancel(nlskb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct xdp_diag_req *req = nlmsg_data(cb->nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct net *net = sock_net(nlskb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	int num = 0, s_num = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	mutex_lock(&net->xdp.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	sk_for_each(sk, &net->xdp.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		if (!net_eq(sock_net(sk), net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		if (num++ < s_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		if (xsk_diag_fill(sk, nlskb, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 				  sk_user_ns(NETLINK_CB(cb->skb).sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 				  NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 				  sock_i_ino(sk)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			num--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	mutex_unlock(&net->xdp.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	cb->args[0] = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return nlskb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct netlink_dump_control c = { .dump = xsk_diag_dump };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	int hdrlen = sizeof(struct xdp_diag_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct net *net = sock_net(nlskb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (nlmsg_len(hdr) < hdrlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (!(hdr->nlmsg_flags & NLM_F_DUMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static const struct sock_diag_handler xsk_diag_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	.family = AF_XDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	.dump = xsk_diag_handler_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static int __init xsk_diag_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	return sock_diag_register(&xsk_diag_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void __exit xsk_diag_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	sock_diag_unregister(&xsk_diag_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) module_init(xsk_diag_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) module_exit(xsk_diag_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);