^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __VDSO_HELPERS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __VDSO_HELPERS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <vdso/datapage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) static __always_inline u32 vdso_read_begin(const struct vdso_data *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) u32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) while (unlikely((seq = READ_ONCE(vd->seq)) & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static __always_inline u32 vdso_read_retry(const struct vdso_data *vd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u32 start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) seq = READ_ONCE(vd->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return seq != start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static __always_inline void vdso_write_begin(struct vdso_data *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * WRITE_ONCE it is required otherwise the compiler can validly tear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * updates to vd[x].seq and it is possible that the value seen by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * reader it is inconsistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static __always_inline void vdso_write_end(struct vdso_data *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * WRITE_ONCE it is required otherwise the compiler can validly tear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * updates to vd[x].seq and it is possible that the value seen by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * reader it is inconsistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #endif /* !__ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif /* __VDSO_HELPERS_H */