Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Userland implementation of gettimeofday() for 32 bits processes in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * ppc64 kernel for use in the vDSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *                    IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/vdso_datapage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) /* Offset for the low 32-bit part of a field of long type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define LOPART	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define LOPART	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Exact prototype of gettimeofday
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) V_FUNCTION_BEGIN(__kernel_gettimeofday)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)   .cfi_startproc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	mflr	r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)   .cfi_register lr,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	mr.	r10,r3			/* r10 saves tv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	mr	r11,r4			/* r11 saves tz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	get_datapage	r9, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	beq	3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	LOAD_REG_IMMEDIATE(r7, 1000000)	/* load up USEC_PER_SEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	bl	__do_get_tspec@local	/* get sec/usec from tb & kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	stw	r3,TVAL32_TV_SEC(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	stw	r4,TVAL32_TV_USEC(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 3:	cmplwi	r11,0			/* check if tz is NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	mtlr	r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	crclr	cr0*4+so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	li	r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	beqlr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	lwz	r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	lwz	r5,CFG_TZ_DSTTIME(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	stw	r4,TZONE_TZ_MINWEST(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	stw	r5,TZONE_TZ_DSTTIME(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)   .cfi_endproc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) V_FUNCTION_END(__kernel_gettimeofday)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * Exact prototype of clock_gettime()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) V_FUNCTION_BEGIN(__kernel_clock_gettime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)   .cfi_startproc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	/* Check for supported clock IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	cmpli	cr0,r3,CLOCK_REALTIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	cmpli	cr1,r3,CLOCK_MONOTONIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	cmpli	cr5,r3,CLOCK_REALTIME_COARSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	cmpli	cr6,r3,CLOCK_MONOTONIC_COARSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	cror	cr5*4+eq,cr5*4+eq,cr6*4+eq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	cror	cr0*4+eq,cr0*4+eq,cr5*4+eq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	bne	cr0, .Lgettime_fallback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	mflr	r12			/* r12 saves lr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)   .cfi_register lr,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	mr	r11,r4			/* r11 saves tp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	get_datapage	r9, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	LOAD_REG_IMMEDIATE(r7, NSEC_PER_SEC)	/* load up NSEC_PER_SEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	beq	cr5, .Lcoarse_clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) .Lprecise_clocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	bl	__do_get_tspec@local	/* get sec/nsec from tb & kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	bne	cr1, .Lfinish		/* not monotonic -> all done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * CLOCK_MONOTONIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	/* now we must fixup using wall to monotonic. We need to snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 * that value and do the counter trick again. Fortunately, we still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * have the counter value in r8 that was returned by __do_get_xsec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 * At this point, r3,r4 contain our sec/nsec values, r5 and r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 * can be used, r7 contains NSEC_PER_SEC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	lwz	r5,(WTOM_CLOCK_SEC+LOPART)(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	lwz	r6,WTOM_CLOCK_NSEC(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	/* We now have our offset in r5,r6. We create a fake dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 * on that value and re-check the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	or	r0,r6,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	xor	r0,r0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	add	r9,r9,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)         cmpl    cr0,r8,r0		/* check if updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	bne-	.Lprecise_clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	b	.Lfinish_monotonic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 * For coarse clocks we get data directly from the vdso data page, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * we don't need to call __do_get_tspec, but we still need to do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * counter trick.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .Lcoarse_clocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	lwz	r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	andi.	r0,r8,1                 /* pending update ? loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	bne-	.Lcoarse_clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	add	r9,r9,r0		/* r0 is already 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	lwz	r3,STAMP_XTIME_SEC+LOPART(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	lwz	r4,STAMP_XTIME_NSEC+LOPART(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	bne	cr6,1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	/* CLOCK_MONOTONIC_COARSE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	lwz	r5,(WTOM_CLOCK_SEC+LOPART)(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	lwz	r6,WTOM_CLOCK_NSEC(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	/* check if counter has updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	or	r0,r6,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 1:	or	r0,r0,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	or	r0,r0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	xor	r0,r0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	add	r3,r3,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	lwz	r0,CFG_TB_UPDATE_COUNT+LOPART(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	cmpl	cr0,r0,r8               /* check if updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	bne-	.Lcoarse_clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	/* Counter has not updated, so continue calculating proper values for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 * sec and nsec if monotonic coarse, or just return with the proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 * values for realtime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	bne	cr6, .Lfinish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	/* Calculate and store result. Note that this mimics the C code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	 * which may cause funny results if nsec goes negative... is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	 * possible at all ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) .Lfinish_monotonic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	add	r3,r3,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	add	r4,r4,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	cmpw	cr0,r4,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	cmpwi	cr1,r4,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	blt	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	subf	r4,r7,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	addi	r3,r3,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 1:	bge	cr1, .Lfinish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	addi	r3,r3,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	add	r4,r4,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .Lfinish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	stw	r3,TSPC32_TV_SEC(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	stw	r4,TSPC32_TV_NSEC(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	mtlr	r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	crclr	cr0*4+so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	li	r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	 * syscall fallback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .Lgettime_fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	li	r0,__NR_clock_gettime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)   .cfi_restore lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	sc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)   .cfi_endproc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) V_FUNCTION_END(__kernel_clock_gettime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * Exact prototype of clock_getres()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) V_FUNCTION_BEGIN(__kernel_clock_getres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)   .cfi_startproc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	/* Check for supported clock IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	cmplwi	cr0, r3, CLOCK_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	cmpwi	cr1, r3, CLOCK_REALTIME_COARSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	cmpwi	cr7, r3, CLOCK_MONOTONIC_COARSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	bgt	cr0, 99f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	LOAD_REG_IMMEDIATE(r5, KTIME_LOW_RES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	beq	cr1, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	beq	cr7, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	mflr	r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)   .cfi_register lr,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	get_datapage	r3, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	lwz	r5, CLOCK_HRTIMER_RES(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	mtlr	r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 1:	li	r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	cmpli	cr0,r4,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	crclr	cr0*4+so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	beqlr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	stw	r3,TSPC32_TV_SEC(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	stw	r5,TSPC32_TV_NSEC(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 * syscall fallback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 99:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	li	r0,__NR_clock_getres
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	sc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)   .cfi_endproc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) V_FUNCTION_END(__kernel_clock_getres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * Exact prototype of time()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * time_t time(time *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) V_FUNCTION_BEGIN(__kernel_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)   .cfi_startproc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	mflr	r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)   .cfi_register lr,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	mr	r11,r3			/* r11 holds t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	get_datapage	r9, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	lwz	r3,STAMP_XTIME_SEC+LOPART(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	cmplwi	r11,0			/* check if t is NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	mtlr	r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	crclr	cr0*4+so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	beqlr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	stw	r3,0(r11)		/* store result at *t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)   .cfi_endproc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) V_FUNCTION_END(__kernel_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * This is the core of clock_gettime() and gettimeofday(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * it returns the current time in r3 (seconds) and r4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * On entry, r7 gives the resolution of r4, either USEC_PER_SEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * It expects the datapage ptr in r9 and doesn't clobber it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * It clobbers r0, r5 and r6.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * On return, r8 contains the counter value that can be reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * This clobbers cr0 but not any other cr field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) __do_get_tspec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)   .cfi_startproc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	/* Check for update count & load values. We use the low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	 * order 32 bits of the update count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 1:	lwz	r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	andi.	r0,r8,1			/* pending update ? loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	bne-	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	xor	r0,r8,r8		/* create dependency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	add	r9,r9,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	/* Load orig stamp (offset to TB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	lwz	r5,CFG_TB_ORIG_STAMP(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	lwz	r6,(CFG_TB_ORIG_STAMP+4)(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	/* Get a stable TB value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 2:	MFTBU(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	MFTBL(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	MFTBU(r0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	cmplw	cr0,r3,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	bne-	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	/* Subtract tb orig stamp and shift left 12 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	subfc	r4,r6,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	subfe	r0,r5,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	slwi	r0,r0,12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	rlwimi.	r0,r4,12,20,31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	slwi	r4,r4,12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	 * Load scale factor & do multiplication.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	 * We only use the high 32 bits of the tb_to_xs value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	 * Even with a 1GHz timebase clock, the high 32 bits of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	 * tb_to_xs will be at least 4 million, so the error from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	 * ignoring the low 32 bits will be no more than 0.25ppm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	 * The error will just make the clock run very very slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	 * slow until the next time the kernel updates the VDSO data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	 * at which point the clock will catch up to the kernel's value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	 * so there is no long-term error accumulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	lwz	r5,CFG_TB_TO_XS(r9)	/* load values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	mulhwu	r4,r4,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	li	r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	beq+	4f			/* skip high part computation if 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	mulhwu	r3,r0,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	mullw	r5,r0,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	addc	r4,r4,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	addze	r3,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	/* At this point, we have seconds since the xtime stamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 * as a 32.32 fixed-point number in r3 and r4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 * Load & add the xtime stamp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	lwz	r5,STAMP_XTIME_SEC+LOPART(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	lwz	r6,STAMP_SEC_FRAC(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	addc	r4,r4,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	adde	r3,r3,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	/* We create a fake dependency on the result in r3/r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * and re-check the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	or	r6,r4,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	xor	r0,r6,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	add	r9,r9,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)         cmplw	cr0,r8,r0		/* check if updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	bne-	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	mulhwu	r4,r4,r7		/* convert to micro or nanoseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)   .cfi_endproc