Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _KERNEL_EVENTS_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _KERNEL_EVENTS_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) /* Buffer handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define RING_BUFFER_WRITABLE		0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) struct perf_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	refcount_t			refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	struct rcu_head			rcu_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #ifdef CONFIG_PERF_USE_VMALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	struct work_struct		work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	int				page_order;	/* allocation order  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	int				nr_pages;	/* nr of data pages  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	int				overwrite;	/* can overwrite itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	int				paused;		/* can write into ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	atomic_t			poll;		/* POLL_ for wakeups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	local_t				head;		/* write position    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	unsigned int			nest;		/* nested writers    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	local_t				events;		/* event limit       */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	local_t				wakeup;		/* wakeup stamp      */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	local_t				lost;		/* nr records lost   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	long				watermark;	/* wakeup watermark  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	long				aux_watermark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	/* poll crap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	spinlock_t			event_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct list_head		event_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	atomic_t			mmap_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	unsigned long			mmap_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct user_struct		*mmap_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	/* AUX area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	long				aux_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	unsigned int			aux_nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	unsigned long			aux_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	int				aux_nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	int				aux_overwrite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	atomic_t			aux_mmap_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	unsigned long			aux_mmap_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	void				(*free_aux)(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	refcount_t			aux_refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	int				aux_in_sampling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	void				**aux_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	void				*aux_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct perf_event_mmap_page	*user_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	void				*data_pages[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) extern void rb_free(struct perf_buffer *rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static inline void rb_free_rcu(struct rcu_head *rcu_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct perf_buffer *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	rb = container_of(rcu_head, struct perf_buffer, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	rb_free(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (!pause && rb->nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		rb->paused = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		rb->paused = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) extern struct perf_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) rb_alloc(int nr_pages, long watermark, int cpu, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) extern void perf_event_wakeup(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			pgoff_t pgoff, int nr_pages, long watermark, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) extern void rb_free_aux(struct perf_buffer *rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) extern void ring_buffer_put(struct perf_buffer *rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static inline bool rb_has_aux(struct perf_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return !!rb->aux_nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) void perf_event_aux_event(struct perf_event *event, unsigned long head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			  unsigned long size, u64 flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) extern struct page *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #ifdef CONFIG_PERF_USE_VMALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * Back perf_mmap() with vmalloc memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * Required for architectures that have d-cache aliasing issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline int page_order(struct perf_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	return rb->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline int page_order(struct perf_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline unsigned long perf_data_size(struct perf_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline unsigned long perf_aux_size(struct perf_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return rb->aux_nr_pages << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	unsigned long size, written;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		size    = min(handle->size, len);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		written = memcpy_func(__VA_ARGS__);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		written = size - written;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		len -= written;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		handle->addr += written;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		if (advance_buf)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			buf += written;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		handle->size -= written;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		if (!handle->size) {					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			struct perf_buffer *rb = handle->rb;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			handle->page++;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			handle->page &= rb->nr_pages - 1;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			handle->addr = rb->data_pages[handle->page];	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			handle->size = PAGE_SIZE << page_order(rb);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	} while (len && written == size);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return len;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline unsigned long						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) func_name(struct perf_output_handle *handle,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	  const void *buf, unsigned long len)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		const void *buf, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	unsigned long orig_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 				  orig_len - len, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) memcpy_common(void *dst, const void *src, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	memcpy(dst, src, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) memcpy_skip(void *dst, const void *src, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #ifndef arch_perf_out_copy_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define arch_perf_out_copy_user arch_perf_out_copy_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	pagefault_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	ret = __copy_from_user_inatomic(dst, src, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	pagefault_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static inline int get_recursion_context(int *recursion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	unsigned int pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	unsigned char rctx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	rctx += !!(pc & (NMI_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (recursion[rctx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	recursion[rctx]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return rctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static inline void put_recursion_context(int *recursion, int rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	recursion[rctx]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static inline bool arch_perf_have_user_stack_dump(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline bool arch_perf_have_user_stack_dump(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define perf_user_stack_pointer(regs) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #endif /* _KERNEL_EVENTS_INTERNAL_H */