^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __ASM_SH_CMPXCHG_XCHG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __ASM_SH_CMPXCHG_XCHG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2016 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Michael S. Tsirkin <mst@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Note: this header isn't self-contained: before including it, __cmpxchg_u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * must be defined first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) int off = (unsigned long)ptr % sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) volatile u32 *p = ptr - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) int bitoff = off * BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u32 oldv, newv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) oldv = READ_ONCE(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) ret = (oldv & bitmask) >> bitoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) newv = (oldv & ~bitmask) | (x << bitoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) } while (__cmpxchg_u32(p, oldv, newv) != oldv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return __xchg_cmpxchg(m, val, sizeof *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return __xchg_cmpxchg(m, val, sizeof *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif /* __ASM_SH_CMPXCHG_XCHG_H */