Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Read/write thread of a guest agent for virtio-trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2012 Hitachi, Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Created by Yoshihiro Yunomae <yoshihiro.yunomae.ez@hitachi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *            Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <sys/syscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "trace-agent.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define READ_WAIT_USEC	100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) void *rw_thread_info_new(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct rw_thread_info *rw_ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	rw_ti = zalloc(sizeof(struct rw_thread_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	if (rw_ti == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 		pr_err("rw_thread_info zalloc error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		exit(EXIT_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	rw_ti->cpu_num = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	rw_ti->in_fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	rw_ti->out_fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	rw_ti->read_pipe = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	rw_ti->write_pipe = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	rw_ti->pipe_size = PIPE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	return rw_ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) void *rw_thread_init(int cpu, const char *in_path, const char *out_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 				bool stdout_flag, unsigned long pipe_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 				struct rw_thread_info *rw_ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int data_pipe[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	rw_ti->cpu_num = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	/* set read(input) fd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	rw_ti->in_fd = open(in_path, O_RDONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (rw_ti->in_fd == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		pr_err("Could not open in_fd (CPU:%d)\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	/* set write(output) fd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (!stdout_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		/* virtio-serial output mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		rw_ti->out_fd = open(out_path, O_WRONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		if (rw_ti->out_fd == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			pr_err("Could not open out_fd (CPU:%d)\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		/* stdout mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		rw_ti->out_fd = STDOUT_FILENO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	if (pipe2(data_pipe, O_NONBLOCK) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		pr_err("Could not create pipe in rw-thread(%d)\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 * Size of pipe is 64kB in default based on fs/pipe.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 * To read/write trace data speedy, pipe size is changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	if (fcntl(*data_pipe, F_SETPIPE_SZ, pipe_size) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		pr_err("Could not change pipe size in rw-thread(%d)\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	rw_ti->read_pipe = data_pipe[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	rw_ti->write_pipe = data_pipe[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	rw_ti->pipe_size = pipe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	exit(EXIT_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /* Bind a thread to a cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static void bind_cpu(int cpu_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	cpu_set_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	CPU_ZERO(&mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	CPU_SET(cpu_num, &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	/* bind my thread to cpu_num by assigning zero to the first argument */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (sched_setaffinity(0, sizeof(mask), &mask) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		pr_err("Could not set CPU#%d affinity\n", (int)cpu_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void *rw_thread_main(void *thread_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	ssize_t rlen, wlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct rw_thread_info *ts = (struct rw_thread_info *)thread_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	bind_cpu(ts->cpu_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		/* Wait for a read order of trace data by Host OS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		if (!global_run_operation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			pthread_mutex_lock(&mutex_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			pthread_cond_wait(&cond_wakeup, &mutex_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			pthread_mutex_unlock(&mutex_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		if (global_sig_receive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		 * Each thread read trace_pipe_raw of each cpu bounding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		 * thread, so contention of multi-threads does not occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		rlen = splice(ts->in_fd, NULL, ts->read_pipe, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 				ts->pipe_size, SPLICE_F_MOVE | SPLICE_F_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		if (rlen < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			pr_err("Splice_read in rw-thread(%d)\n", ts->cpu_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		} else if (rlen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			 * If trace data do not exist or are unreadable not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			 * for exceeding the page size, splice_read returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			 * NULL. Then, this waits for being filled the data in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			 * ring-buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			usleep(READ_WAIT_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			pr_debug("Read retry(cpu:%d)\n", ts->cpu_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		wlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			ret = splice(ts->write_pipe, NULL, ts->out_fd, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 					rlen - wlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 					SPLICE_F_MOVE | SPLICE_F_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 				pr_err("Splice_write in rw-thread(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 								ts->cpu_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 				goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			} else if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 				 * When host reader is not in time for reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 				 * trace data, guest will be stopped. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 				 * because char dev in QEMU is not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 				 * non-blocking mode. Then, writer might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 				 * sleep in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				 * This sleep will be removed by supporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 				 * non-blocking mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 				sleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			wlen += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		} while (wlen < rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	exit(EXIT_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) pthread_t rw_thread_run(struct rw_thread_info *rw_ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	pthread_t rw_thread_per_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	ret = pthread_create(&rw_thread_per_cpu, NULL, rw_thread_main, rw_ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		pr_err("Could not create a rw thread(%d)\n", rw_ti->cpu_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		exit(EXIT_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	return rw_thread_per_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }