Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #undef TRACE_SYSTEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define TRACE_SYSTEM io_uring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #define _TRACE_IO_URING_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/tracepoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) struct io_wq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * io_uring_create - called after a new io_uring context was prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * @fd:			corresponding file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * @ctx:		pointer to a ring context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * @sq_entries:	actual SQ size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * @cq_entries:	actual CQ size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * @flags:		SQ ring flags, provided to io_uring_setup(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * Allows to trace io_uring creation and provide pointer to a context, that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * be used later to find correlated events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) TRACE_EVENT(io_uring_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		__field(  int,		fd			)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		__field(  void *,	ctx			)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		__field(  u32,		sq_entries	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		__field(  u32,		cq_entries	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		__field(  u32,		flags		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		__entry->fd			= fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		__entry->ctx		= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		__entry->sq_entries	= sq_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		__entry->cq_entries	= cq_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		__entry->flags		= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 			  __entry->ctx, __entry->fd, __entry->sq_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 			  __entry->cq_entries, __entry->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * io_uring_register - called after a buffer/file/eventfd was succesfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * 					   registered for a ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * @ctx:			pointer to a ring context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * @opcode:			describes which operation to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * @nr_user_files:	number of registered files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * @nr_user_bufs:	number of registered buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * @cq_ev_fd:		whether eventfs registered or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * @ret:			return code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * Allows to trace fixed files/buffers/eventfds, that could be registered to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * avoid an overhead of getting references to them for every operation. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * event, together with io_uring_file_get, can provide a full picture of how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * much overhead one can reduce via fixing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) TRACE_EVENT(io_uring_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			 unsigned nr_bufs, bool eventfd, long ret),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		__field(  void *,	ctx			)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		__field(  unsigned,	opcode		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		__field(  unsigned,	nr_files	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		__field(  unsigned,	nr_bufs		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		__field(  bool,		eventfd		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		__field(  long,		ret			)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		__entry->ctx		= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		__entry->opcode		= opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		__entry->nr_files	= nr_files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		__entry->nr_bufs	= nr_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		__entry->eventfd	= eventfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		__entry->ret		= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			  "eventfd %d, ret %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			  __entry->ctx, __entry->opcode, __entry->nr_files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			  __entry->nr_bufs, __entry->eventfd, __entry->ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * io_uring_file_get - called before getting references to an SQE file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * @ctx:	pointer to a ring context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * @fd:		SQE file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * Allows to trace out how often an SQE file reference is obtained, which can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * help figuring out if it makes sense to use fixed files, or check that fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * files are used correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) TRACE_EVENT(io_uring_file_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	TP_PROTO(void *ctx, int fd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	TP_ARGS(ctx, fd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		__field(  void *,	ctx	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		__field(  int,		fd	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		__entry->ctx	= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		__entry->fd		= fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * io_uring_queue_async_work - called before submitting a new async work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * @ctx:	pointer to a ring context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * @hashed:	type of workqueue, hashed or normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * @req:	pointer to a submitted request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * @work:	pointer to a submitted io_wq_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * Allows to trace asynchronous work submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) TRACE_EVENT(io_uring_queue_async_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			 unsigned int flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	TP_ARGS(ctx, rw, req, work, flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		__field(  void *,				ctx		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		__field(  int,					rw		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		__field(  void *,				req		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		__field(  struct io_wq_work *,		work	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		__field(  unsigned int,			flags	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		__entry->ctx	= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		__entry->rw		= rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		__entry->req	= req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		__entry->work	= work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		__entry->flags	= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			  __entry->ctx, __entry->req, __entry->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			  __entry->rw ? "hashed" : "normal", __entry->work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * io_uring_defer - called when an io_uring request is deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * @ctx:	pointer to a ring context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * @req:	pointer to a deferred request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * @user_data:	user data associated with the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * Allows to track deferred requests, to get an insight about what requests are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * not started immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) TRACE_EVENT(io_uring_defer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	TP_PROTO(void *ctx, void *req, unsigned long long user_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	TP_ARGS(ctx, req, user_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		__field(  void *,	ctx		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		__field(  void *,	req		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		__field(  unsigned long long, data	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		__entry->ctx	= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		__entry->req	= req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		__entry->data	= user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			__entry->req, __entry->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * io_uring_link - called before the io_uring request added into link_list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * 				   another request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * @ctx:			pointer to a ring context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * @req:			pointer to a linked request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * @target_req:		pointer to a previous request, that would contain @req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * Allows to track linked requests, to understand dependencies between requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * and how does it influence their execution flow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) TRACE_EVENT(io_uring_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	TP_PROTO(void *ctx, void *req, void *target_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	TP_ARGS(ctx, req, target_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		__field(  void *,	ctx			)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		__field(  void *,	req			)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		__field(  void *,	target_req	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		__entry->ctx		= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		__entry->req		= req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		__entry->target_req	= target_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	TP_printk("ring %p, request %p linked after %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			  __entry->ctx, __entry->req, __entry->target_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * io_uring_cqring_wait - called before start waiting for an available CQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  * @ctx:		pointer to a ring context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * @min_events:	minimal number of events to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * Allows to track waiting for CQE, so that we can e.g. troubleshoot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * situations, when an application wants to wait for an event, that never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * comes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) TRACE_EVENT(io_uring_cqring_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	TP_PROTO(void *ctx, int min_events),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	TP_ARGS(ctx, min_events),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		__field(  void *,	ctx			)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		__field(  int,		min_events	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		__entry->ctx	= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		__entry->min_events	= min_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * io_uring_fail_link - called before failing a linked request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * @req:	request, which links were cancelled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * @link:	cancelled link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * Allows to track linked requests cancellation, to see not only that some work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  * was cancelled, but also which request was the reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) TRACE_EVENT(io_uring_fail_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	TP_PROTO(void *req, void *link),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	TP_ARGS(req, link),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		__field(  void *,	req		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		__field(  void *,	link	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		__entry->req	= req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		__entry->link	= link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	TP_printk("request %p, link %p", __entry->req, __entry->link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * io_uring_complete - called when completing an SQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * @ctx:		pointer to a ring context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * @user_data:		user data associated with the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * @res:		result of the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) TRACE_EVENT(io_uring_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	TP_PROTO(void *ctx, u64 user_data, long res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	TP_ARGS(ctx, user_data, res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		__field(  void *,	ctx		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		__field(  u64,		user_data	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		__field(  long,		res		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		__entry->ctx		= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		__entry->user_data	= user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		__entry->res		= res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	TP_printk("ring %p, user_data 0x%llx, result %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			  __entry->ctx, (unsigned long long)__entry->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			  __entry->res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * io_uring_submit_sqe - called before submitting one SQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * @ctx:		pointer to a ring context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * @opcode:		opcode of request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * @user_data:		user data associated with the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * @force_nonblock:	whether a context blocking or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * @sq_thread:		true if sq_thread has submitted this SQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  * Allows to track SQE submitting, to understand what was the source of it, SQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  * thread or io_uring_enter call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) TRACE_EVENT(io_uring_submit_sqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		 bool sq_thread),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		__field(  void *,	ctx		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		__field(  u8,		opcode		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		__field(  u64,		user_data	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		__field(  bool,		force_nonblock	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		__field(  bool,		sq_thread	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		__entry->ctx		= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		__entry->opcode		= opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		__entry->user_data	= user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		__entry->force_nonblock	= force_nonblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		__entry->sq_thread	= sq_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			  __entry->ctx, __entry->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			  (unsigned long long) __entry->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			  __entry->force_nonblock, __entry->sq_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) TRACE_EVENT(io_uring_poll_arm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	TP_ARGS(ctx, opcode, user_data, mask, events),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		__field(  void *,	ctx		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		__field(  u8,		opcode		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		__field(  u64,		user_data	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		__field(  int,		mask		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		__field(  int,		events		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		__entry->ctx		= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		__entry->opcode		= opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		__entry->user_data	= user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		__entry->mask		= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		__entry->events		= events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			  __entry->ctx, __entry->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			  (unsigned long long) __entry->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			  __entry->mask, __entry->events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) TRACE_EVENT(io_uring_poll_wake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	TP_ARGS(ctx, opcode, user_data, mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		__field(  void *,	ctx		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		__field(  u8,		opcode		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		__field(  u64,		user_data	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		__field(  int,		mask		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		__entry->ctx		= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		__entry->opcode		= opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		__entry->user_data	= user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		__entry->mask		= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			  __entry->ctx, __entry->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			  (unsigned long long) __entry->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			  __entry->mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) TRACE_EVENT(io_uring_task_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	TP_ARGS(ctx, opcode, user_data, mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		__field(  void *,	ctx		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		__field(  u8,		opcode		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		__field(  u64,		user_data	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		__field(  int,		mask		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		__entry->ctx		= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		__entry->opcode		= opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		__entry->user_data	= user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		__entry->mask		= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	TP_printk("ring %p, op %d, data 0x%llx, mask %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			  __entry->ctx, __entry->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			  (unsigned long long) __entry->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			  __entry->mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) TRACE_EVENT(io_uring_task_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	TP_PROTO(void *ctx, u8 opcode, u64 user_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	TP_ARGS(ctx, opcode, user_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	TP_STRUCT__entry (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		__field(  void *,	ctx		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		__field(  u8,		opcode		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		__field(  u64,		user_data	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		__entry->ctx		= ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		__entry->opcode		= opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		__entry->user_data	= user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	TP_printk("ring %p, op %d, data 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			  __entry->ctx, __entry->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			  (unsigned long long) __entry->user_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) #endif /* _TRACE_IO_URING_H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* This part must be outside protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #include <trace/define_trace.h>