Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * M7memset.S: SPARC M7 optimized memset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2016, Oracle and/or its affiliates.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * M7memset.S: M7 optimized memset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * char *memset(sp, c, n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Set an array of n chars starting at sp to the character c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Return sp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Fast assembler language version of the following C-program for memset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * which represents the `standard' for the C-library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *	void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *	memset(void *sp1, int c, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *	    if (n != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *		char *sp = sp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *		    *sp++ = (char)c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *		} while (--n != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *	    }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *	    return (sp1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * The algorithm is as follows :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *	For small 6 or fewer bytes stores, bytes will be stored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *	For less than 32 bytes stores, align the address on 4 byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *	Then store as many 4-byte chunks, followed by trailing bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *	For sizes greater than 32 bytes, align the address on 8 byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *	if (count >= 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *      	store 8-bytes chunks to align the address on 64 byte boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *		if (value to be set is zero && count >= MIN_ZERO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *              	Using BIS stores, set the first long word of each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *			64-byte cache line to zero which will also clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *			other seven long words of the cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *       	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *       	else if (count >= MIN_LOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *       		Using BIS stores, set the first long word of each of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *              	ST_CHUNK cache lines (64 bytes each) before the main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *			loop is entered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *              	In the main loop, continue pre-setting the first long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  *              	word of each cache line ST_CHUNK lines in advance while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *              	setting the other seven long words (56 bytes) of each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * 			cache line until fewer than ST_CHUNK*64 bytes remain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *			Then set the remaining seven long words of each cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * 			line that has already had its first long word set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *       	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  *       	store remaining data in 64-byte chunks until less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  *       	64 bytes remain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *       }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  *       Store as many 8-byte chunks, followed by trailing bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * BIS = Block Init Store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *   Doing the advance store of the first element of the cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  *   initiates the displacement of a cache line while only using a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *   instruction in the pipeline. That avoids various pipeline delays,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *   such as filling the miss buffer. The performance effect is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *   similar to prefetching for normal stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  *   The special case for zero fills runs faster and uses fewer instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *   cycles than the normal memset loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * We only use BIS for memset of greater than MIN_LOOP bytes because a sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * BIS stores must be followed by a membar #StoreStore. The benefit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * the BIS store must be balanced against the cost of the membar operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * ASI_STBI_P marks the cache line as "least recently used"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * which means if many threads are active, it has a high chance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * of being pushed out of the cache between the first initializing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * store and the final stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * Thus, we use ASI_STBIMRU_P which marks the cache line as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * "most recently used" for all but the last store to the cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #include <asm/asi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define ASI_STBI_P      ASI_BLK_INIT_QUAD_LDD_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define ASI_STBIMRU_P   ASI_ST_BLKINIT_MRU_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define ST_CHUNK        24   /* multiple of 4 due to loop unrolling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #define MIN_LOOP        16320
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define MIN_ZERO        512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	.section	".text"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	.align		32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * Define clear_page(dest) as memset(dest, 0, PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * (can create a more optimized version later.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	.globl		M7clear_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	.globl		M7clear_user_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) M7clear_page:		/* clear_page(dest) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) M7clear_user_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	set	PAGE_SIZE, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	/* fall through into bzero code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	.size		M7clear_page,.-M7clear_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	.size		M7clear_user_page,.-M7clear_user_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * Define bzero(dest, n) as memset(dest, 0, n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * (can create a more optimized version later.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	.globl		M7bzero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) M7bzero:		/* bzero(dest, size) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	mov	%o1, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	mov	0, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	/* fall through into memset code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	.size		M7bzero,.-M7bzero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	.global		M7memset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	.type		M7memset, #function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	.register	%g3, #scratch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) M7memset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	mov     %o0, %o5                ! copy sp1 before using it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	cmp     %o2, 7                  ! if small counts, just write bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	bleu,pn %xcc, .wrchar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 and     %o1, 0xff, %o1          ! o1 is (char)c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	sll     %o1, 8, %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	or      %o1, %o3, %o1           ! now o1 has 2 bytes of c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	sll     %o1, 16, %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	cmp     %o2, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	blu,pn  %xcc, .wdalign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 or      %o1, %o3, %o1           ! now o1 has 4 bytes of c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	sllx    %o1, 32, %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	or      %o1, %o3, %o1           ! now o1 has 8 bytes of c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .dbalign:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	andcc   %o5, 7, %o3             ! is sp1 aligned on a 8 byte bound?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	bz,pt   %xcc, .blkalign         ! already long word aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 sub     %o3, 8, %o3             ! -(bytes till long word aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	add     %o2, %o3, %o2           ! update o2 with new count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	! Set -(%o3) bytes till sp1 long word aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 1:	stb     %o1, [%o5]              ! there is at least 1 byte to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	inccc   %o3                     ! byte clearing loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	bl,pt   %xcc, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	 inc     %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	! Now sp1 is long word aligned (sp1 is found in %o5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) .blkalign:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	cmp     %o2, 64                 ! check if there are 64 bytes to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	blu,pn  %xcc, .wrshort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 mov     %o2, %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	andcc   %o5, 63, %o3            ! is sp1 block aligned?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	bz,pt   %xcc, .blkwr            ! now block aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 sub     %o3, 64, %o3            ! o3 is -(bytes till block aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	add     %o2, %o3, %o2           ! o2 is the remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	! Store -(%o3) bytes till dst is block (64 byte) aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	! Use long word stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	! Recall that dst is already long word aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	addcc   %o3, 8, %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	stx     %o1, [%o5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	bl,pt   %xcc, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 add     %o5, 8, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	! Now sp1 is block aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) .blkwr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	andn    %o2, 63, %o4            ! calculate size of blocks in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	brz,pn  %o1, .wrzero            ! special case if c == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	 and     %o2, 63, %o3            ! %o3 = bytes left after blk stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	set     MIN_LOOP, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	cmp     %o4, %g1                ! check there are enough bytes to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	blu,pn  %xcc, .short_set        ! to justify cost of membar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	                                ! must be > pre-cleared lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	 nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	! initial cache-clearing stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	! get store pipeline moving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	rd	%asi, %g3		! save %asi to be restored later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	wr     %g0, ASI_STBIMRU_P, %asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	! Primary memset loop for large memsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .wr_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	sub     %o5, 8, %o5		! adjust %o5 for ASI store alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	mov     ST_CHUNK, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) .wr_loop_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	stxa    %o1, [%o5+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	subcc   %g1, 4, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	stxa    %o1, [%o5+8+64]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	add     %o5, 256, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	stxa    %o1, [%o5+8-128]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	bgu     %xcc, .wr_loop_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 stxa    %o1, [%o5+8-64]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	sub     %o5, ST_CHUNK*64, %o5	! reset %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	mov     ST_CHUNK, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) .wr_loop_rest:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	stxa    %o1, [%o5+8+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	sub     %o4, 64, %o4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	stxa    %o1, [%o5+16+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	subcc   %g1, 1, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	stxa    %o1, [%o5+24+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	stxa    %o1, [%o5+32+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	stxa    %o1, [%o5+40+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	add     %o5, 64, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	stxa    %o1, [%o5-8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	bgu     %xcc, .wr_loop_rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	 stxa    %o1, [%o5]ASI_STBI_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	! If more than ST_CHUNK*64 bytes remain to set, continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	! setting the first long word of each cache line in advance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	! to keep the store pipeline moving.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	cmp     %o4, ST_CHUNK*64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	bge,pt  %xcc, .wr_loop_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	 mov     ST_CHUNK, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	brz,a,pn %o4, .asi_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	 add     %o5, 8, %o5             ! restore %o5 offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .wr_loop_small:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	stxa    %o1, [%o5+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	stxa    %o1, [%o5+8+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	stxa    %o1, [%o5+16+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	stxa    %o1, [%o5+24+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	stxa    %o1, [%o5+32+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	subcc   %o4, 64, %o4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	stxa    %o1, [%o5+40+8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	add     %o5, 64, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	stxa    %o1, [%o5-8]%asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	bgu,pt  %xcc, .wr_loop_small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	 stxa    %o1, [%o5]ASI_STBI_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	ba      .asi_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	 add     %o5, 8, %o5             ! restore %o5 offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	! Special case loop for zero fill memsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	! For each 64 byte cache line, single STBI to first element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	! clears line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .wrzero:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	cmp     %o4, MIN_ZERO           ! check if enough bytes to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 					! to pay %asi + membar cost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	blu     %xcc, .short_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	sub     %o4, 256, %o4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .wrzero_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	mov     64, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	stxa    %o1, [%o5]ASI_STBI_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	subcc   %o4, 256, %o4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	stxa    %o1, [%o5+%g3]ASI_STBI_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	add     %o5, 256, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	sub     %g3, 192, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	stxa    %o1, [%o5+%g3]ASI_STBI_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	add %g3, 64, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	bge,pt  %xcc, .wrzero_loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	 stxa    %o1, [%o5+%g3]ASI_STBI_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	add     %o4, 256, %o4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	brz,pn  %o4, .bsi_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .wrzero_small:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	stxa    %o1, [%o5]ASI_STBI_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	subcc   %o4, 64, %o4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	bgu,pt  %xcc, .wrzero_small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 add     %o5, 64, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	ba,a	.bsi_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .asi_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	wr	%g3, 0x0, %asi		! restored saved %asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) .bsi_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	membar  #StoreStore             ! required by use of Block Store Init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .short_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	cmp     %o4, 64                 ! check if 64 bytes to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	blu     %xcc, 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 4:                                      ! set final blocks of 64 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	stx     %o1, [%o5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	stx     %o1, [%o5+8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	stx     %o1, [%o5+16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	stx     %o1, [%o5+24]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	subcc   %o4, 64, %o4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	stx     %o1, [%o5+32]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	stx     %o1, [%o5+40]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	add     %o5, 64, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	stx     %o1, [%o5-16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	bgu,pt  %xcc, 4b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	 stx     %o1, [%o5-8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	! Set the remaining long words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) .wrshort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	subcc   %o3, 8, %o3             ! Can we store any long words?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	blu,pn  %xcc, .wrchars
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 and     %o2, 7, %o2             ! calc bytes left after long words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	subcc   %o3, 8, %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	stx     %o1, [%o5]              ! store the long words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	bgeu,pt %xcc, 6b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	 add     %o5, 8, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) .wrchars:                               ! check for extra chars
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	brnz    %o2, .wrfin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	retl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) .wdalign:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	andcc   %o5, 3, %o3             ! is sp1 aligned on a word boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	bz,pn   %xcc, .wrword
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 andn    %o2, 3, %o3             ! create word sized count in %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	dec     %o2                     ! decrement count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	stb     %o1, [%o5]              ! clear a byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	b       .wdalign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 inc     %o5                     ! next byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .wrword:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	subcc   %o3, 4, %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	st      %o1, [%o5]              ! 4-byte writing loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	bnz,pt  %xcc, .wrword
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	 add     %o5, 4, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	and     %o2, 3, %o2             ! leftover count, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .wrchar:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	! Set the remaining bytes, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	brz     %o2, .exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	 nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) .wrfin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	deccc   %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	stb     %o1, [%o5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	bgu,pt  %xcc, .wrfin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	 inc     %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	retl                            ! %o0 was preserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	 nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	.size		M7memset,.-M7memset