Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2016 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Michael S. Tsirkin <mst@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Simple descriptor-based ring. virtio 0.9 compatible event index is used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * signalling, unconditionally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "main.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) /* Next - Where next entry will be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * Prev - "Next" value when event triggered previously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * Event - Peer requested event after writing this entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) static inline bool need_event(unsigned short event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 			      unsigned short next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 			      unsigned short prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	return (unsigned short)(next - event - 1) < (unsigned short)(next - prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /* Design:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * Guest adds descriptors with unique index values and DESC_HW in flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * Host overwrites used descriptors with correct len, index, and DESC_HW clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * Flags are always set last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define DESC_HW 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	unsigned short flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	unsigned short index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	unsigned len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	unsigned long long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) /* how much padding is needed to avoid false cache sharing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define HOST_GUEST_PADDING 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /* Mostly read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) struct event {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	unsigned short kick_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	unsigned char reserved0[HOST_GUEST_PADDING - 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	unsigned short call_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	unsigned char reserved1[HOST_GUEST_PADDING - 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) struct data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	void *buf; /* descriptor is writeable, we can't get buf from there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) } *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) struct desc *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) struct event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) struct guest {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	unsigned avail_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	unsigned last_used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	unsigned num_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	unsigned kicked_avail_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	unsigned char reserved[HOST_GUEST_PADDING - 12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) } guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) struct host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	/* we do not need to track last avail index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * unless we have more than one in flight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	unsigned used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	unsigned called_used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	unsigned char reserved[HOST_GUEST_PADDING - 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) } host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) /* implemented by ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) void alloc_ring(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		perror("Unable to allocate ring buffer.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		exit(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	event = calloc(1, sizeof(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (!event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		perror("Unable to allocate event buffer.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		exit(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	guest.avail_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	guest.kicked_avail_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	guest.last_used_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	host.used_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	host.called_used_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	for (i = 0; i < ring_size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		struct desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			.index = i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		ring[i] = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	guest.num_free = ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	data = calloc(ring_size, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		perror("Unable to allocate data buffer.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		exit(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* guest side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int add_inbuf(unsigned len, void *buf, void *datap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	unsigned head, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (!guest.num_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	guest.num_free--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	head = (ring_size - 1) & (guest.avail_idx++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	/* Start with a write. On MESI architectures this helps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 * avoid a shared state with consumer that is polling this descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	ring[head].addr = (unsigned long)(void*)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	ring[head].len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	/* read below might bypass write above. That is OK because it's just an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 * optimization. If this happens, we will get the cache line in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 * shared state which is unfortunate, but probably not worth it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * add an explicit full barrier to avoid this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	index = ring[head].index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	data[index].buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	data[index].data = datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/* Barrier A (for pairing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	smp_release();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	ring[head].flags = DESC_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void *get_buf(unsigned *lenp, void **bufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	unsigned head = (ring_size - 1) & guest.last_used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	void *datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (ring[head].flags & DESC_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	/* Barrier B (for pairing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	smp_acquire();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	*lenp = ring[head].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	index = ring[head].index & (ring_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	datap = data[index].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	*bufp = data[index].buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	data[index].buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	data[index].data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	guest.num_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	guest.last_used_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) bool used_empty()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	unsigned head = (ring_size - 1) & guest.last_used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	return (ring[head].flags & DESC_HW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void disable_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	/* Doing nothing to disable calls might cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * extra interrupts, but reduces the number of cache misses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bool enable_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	event->call_index = guest.last_used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	/* Flush call index write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* Barrier D (for pairing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	return used_empty();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void kick_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	bool need;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	/* Flush in previous flags write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	/* Barrier C (for pairing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	need = need_event(event->kick_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			   guest.avail_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			   guest.kicked_avail_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	guest.kicked_avail_idx = guest.avail_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (need)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		kick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* host side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void disable_kick()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/* Doing nothing to disable kicks might cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	 * extra interrupts, but reduces the number of cache misses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bool enable_kick()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	event->kick_index = host.used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	/* Barrier C (for pairing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	return avail_empty();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) bool avail_empty()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	unsigned head = (ring_size - 1) & host.used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	return !(ring[head].flags & DESC_HW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bool use_buf(unsigned *lenp, void **bufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	unsigned head = (ring_size - 1) & host.used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (!(ring[head].flags & DESC_HW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	/* make sure length read below is not speculated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	/* Barrier A (for pairing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	smp_acquire();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	/* simple in-order completion: we don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 * to touch index at all. This also means we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 * can just modify the descriptor in-place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	ring[head].len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	/* Make sure len is valid before flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	 * Note: alternative is to write len and flags in one access -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	 * possible on 64 bit architectures but wmb is free on Intel anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	 * so I have no way to test whether it's a gain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	/* Barrier B (for pairing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	smp_release();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	ring[head].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	host.used_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) void call_used(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	bool need;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	/* Flush in previous flags write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	/* Barrier D (for pairing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	need = need_event(event->call_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			host.used_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			host.called_used_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	host.called_used_idx = host.used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (need)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		call();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }