^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Unified implementation of memcpy, memmove and the __copy_user backend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2002 Broadcom, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * memcpy/copy_user author: Mark Vandevoorde
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 2007 Maciej W. Rozycki
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 2014 Imagination Technologies Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Mnemonic names for arguments to memcpy/__copy_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Hack to resolve longstanding prefetch issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Prefetching may be fatal on some systems if we're prefetching beyond the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * end of memory on some systems. It's also a seriously bad idea on non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * dma-coherent systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #ifdef CONFIG_DMA_NONCOHERENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #undef CONFIG_CPU_HAS_PREFETCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #ifdef CONFIG_MIPS_MALTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #undef CONFIG_CPU_HAS_PREFETCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #ifdef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #undef CONFIG_CPU_HAS_PREFETCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/regdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define dst a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define src a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define len a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * memcpy copies len bytes from src to dst and sets v0 to dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * It assumes that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * - src and dst don't overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * - src is readable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * - dst is writable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * memcpy uses the standard calling convention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * the number of uncopied bytes due to an exception caused by a read or write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * __copy_user assumes that src and dst don't overlap, and that the call is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * implementing one of the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * copy_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * - src is readable (no exceptions when reading src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * copy_from_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * - dst is writable (no exceptions when writing dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * __copy_user uses a non-standard calling convention; see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * include/asm-mips/uaccess.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * When an exception happens on a load, the handler must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) # ensure that all of the destination buffer is overwritten to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * leaking information to user mode programs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * The exception handler for loads requires that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * 1- AT contain the address of the byte just past the end of the source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * of the copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * 2- src_entry <= src < AT, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * 3- (dst - src) == (dst_entry - src_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * The _entry suffix denotes values when __copy_user was called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * (2) is met by incrementing src by the number of bytes copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * (3) is met by not doing loads between a pair of increments of dst and src
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * The exception handlers for stores adjust len (if necessary) and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * These handlers do not need to overwrite any data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * For __rmemcpy and memmove an exception is always a kernel bug, therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * they're not protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Instruction type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define LD_INSN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define ST_INSN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Pretech type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define SRC_PREFETCH 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define DST_PREFETCH 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define LEGACY_MODE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define EVA_MODE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define USEROP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define KERNELOP 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Wrapper to add an entry in the exception table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * in case the insn causes a memory exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * insn : Load/store instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * type : Instruction type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * reg : Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * addr : Address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * handler : Exception handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define EXC(insn, type, reg, addr, handler) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) .if \mode == LEGACY_MODE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 9: insn reg, addr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) .section __ex_table,"a"; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) PTR 9b, handler; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .previous; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* This is assembled in EVA mode */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .else; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* If loading from user or storing to user */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) .if ((\from == USEROP) && (type == LD_INSN)) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ((\to == USEROP) && (type == ST_INSN)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) .section __ex_table,"a"; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) PTR 9b, handler; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) .previous; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) .else; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Still in EVA, but no need for \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * exception handler or EVA insn \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) insn reg, addr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) .endif; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Only on the 64-bit kernel we can made use of 64-bit registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define USE_DOUBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #ifdef USE_DOUBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define LOADK ld /* No exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define ADD daddu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define SUB dsubu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define SRL dsrl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define SRA dsra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define SLL dsll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define SLLV dsllv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define SRLV dsrlv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define NBYTES 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define LOG_NBYTES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * As we are sharing code base with the mips32 tree (which use the o32 ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * register definitions). We need to redefine the register definitions from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * the n64 ABI register naming to the o32 ABI register naming.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #undef t0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #undef t1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #undef t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #undef t3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define t0 $8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define t1 $9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define t2 $10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define t3 $11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define t4 $12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define t5 $13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define t6 $14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define t7 $15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define LOADK lw /* No exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define ADD addu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define SUB subu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define SRL srl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define SLL sll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define SRA sra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define SLLV sllv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define SRLV srlv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define NBYTES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define LOG_NBYTES 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #endif /* USE_DOUBLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #ifdef CONFIG_CPU_HAS_PREFETCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) # define _PREF(hint, addr, type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .if \mode == LEGACY_MODE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) kernel_pref(hint, addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .else; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ((\to == USEROP) && (type == DST_PREFETCH)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * PREFE has only 9 bits for the offset \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * compared to PREF which has 16, so it may \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * need to use the $at register but this \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * register should remain intact because it's \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * used later on. Therefore use $v1. \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) .set at=v1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) user_pref(hint, addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) .set noat; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .else; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) kernel_pref(hint, addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) .endif; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) # define _PREF(hint, addr, type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #ifdef CONFIG_CPU_LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define LDFIRST LOADR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define LDREST LOADL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define STFIRST STORER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define STREST STOREL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define SHIFT_DISCARD SLLV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #define LDFIRST LOADL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define LDREST LOADR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define STFIRST STOREL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define STREST STORER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #define SHIFT_DISCARD SRLV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #define FIRST(unit) ((unit)*NBYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define REST(unit) (FIRST(unit)+NBYTES-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define UNIT(unit) FIRST(unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #define ADDRMASK (NBYTES-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .set noat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .set at=v1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Macro to build the __copy_user common code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * mode : LEGACY_MODE or EVA_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * from : Source operand. USEROP or KERNELOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * to : Destination operand. USEROP or KERNELOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .macro __BUILD_COPY_USER mode, from, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* initialize __memcpy if this the first time we execute this macro */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .ifnotdef __memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .set __memcpy, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .hidden __memcpy /* make sure it does not leak */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Note: dst & src may be unaligned, len may be 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Temps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define rem t8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * The "issue break"s below are very approximate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Issue delays for dcache fills will perturb the schedule, as will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * load queue full replay traps, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * If len < NBYTES use byte operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) PREFS( 0, 0(src) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) PREFD( 1, 0(dst) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) sltu t2, len, NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) and t1, dst, ADDRMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) PREFS( 0, 1*32(src) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) PREFD( 1, 1*32(dst) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) bnez t2, .Lcopy_bytes_checklen\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) and t0, src, ADDRMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) PREFS( 0, 2*32(src) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) PREFD( 1, 2*32(dst) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #ifndef CONFIG_CPU_NO_LOAD_STORE_LR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bnez t1, .Ldst_unaligned\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) bnez t0, .Lsrc_unaligned_dst_aligned\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #else /* CONFIG_CPU_NO_LOAD_STORE_LR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) or t0, t0, t1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) bnez t0, .Lcopy_unaligned_bytes\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * use delay slot for fall-through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * src and dst are aligned; need to compute rem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .Lboth_aligned\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) PREFS( 0, 3*32(src) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) PREFD( 1, 3*32(dst) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) .align 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) LOAD(t0, UNIT(0)(src), .Ll_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) SUB len, len, 8*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ADD src, src, 8*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ADD dst, dst, 8*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) PREFS( 0, 8*32(src) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) PREFD( 1, 8*32(dst) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) bne len, rem, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * len == rem == the number of bytes left to copy < 8*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) .Lcleanup_both_aligned\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) beqz len, .Ldone\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) sltu t0, len, 4*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) bnez t0, .Lless_than_4units\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) and rem, len, (NBYTES-1) # rem = len % NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * len >= 4*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) LOAD( t0, UNIT(0)(src), .Ll_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) SUB len, len, 4*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ADD src, src, 4*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) .set reorder /* DADDI_WAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ADD dst, dst, 4*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) beqz len, .Ldone\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) .Lless_than_4units\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * rem = len % NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) beq rem, len, .Lcopy_bytes\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) LOAD(t0, 0(src), .Ll_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ADD src, src, NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) SUB len, len, NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) STORE(t0, 0(dst), .Ls_exc_p1u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .set reorder /* DADDI_WAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ADD dst, dst, NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) bne rem, len, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #ifndef CONFIG_CPU_NO_LOAD_STORE_LR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * A loop would do only a byte at a time with possible branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * because can't assume read-access to dst. Instead, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * STREST dst, which doesn't require read access to dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * This code should perform better than a simple loop on modern,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * wide-issue mips processors because the code has fewer branches and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * more instruction-level parallelism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) #define bits t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) beqz len, .Ldone\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ADD t1, dst, len # t1 is just past last byte of dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) li bits, 8*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) SLL rem, len, 3 # rem = number of bits to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) LOAD(t0, 0(src), .Ll_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) SUB bits, bits, rem # bits = number of bits to discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) SHIFT_DISCARD t0, t0, bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) STREST(t0, -1(t1), .Ls_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) jr ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) move len, zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .Ldst_unaligned\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * dst is unaligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * t0 = src & ADDRMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * t1 = dst & ADDRMASK; T1 > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * len >= NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * Copy enough bytes to align dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * Set match = (src and dst have same alignment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #define match rem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ADD t2, zero, NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) SUB t2, t2, t1 # t2 = number of bytes copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) xor match, t0, t1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) beq len, t2, .Ldone\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) SUB len, len, t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ADD dst, dst, t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) beqz match, .Lboth_aligned\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ADD src, src, t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) .Lsrc_unaligned_dst_aligned\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) PREFS( 0, 3*32(src) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) beqz t0, .Lcleanup_src_unaligned\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) PREFD( 1, 3*32(dst) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Avoid consecutive LD*'s to the same register since some mips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * implementations can't issue them in the same cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * It's OK to load FIRST(N+1) before REST(N) because the two addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * are to the same unit (unless src is aligned, but it's not).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) SUB len, len, 4*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ADD src, src, 4*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #ifdef CONFIG_CPU_SB1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) nop # improves slotting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) .set reorder /* DADDI_WAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ADD dst, dst, 4*NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) bne len, rem, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) .Lcleanup_src_unaligned\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) beqz len, .Ldone\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) and rem, len, NBYTES-1 # rem = len % NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) beq rem, len, .Lcopy_bytes\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ADD src, src, NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) SUB len, len, NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) STORE(t0, 0(dst), .Ls_exc_p1u\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) .set reorder /* DADDI_WAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ADD dst, dst, NBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) bne len, rem, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #endif /* !CONFIG_CPU_NO_LOAD_STORE_LR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) .Lcopy_bytes_checklen\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) beqz len, .Ldone\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .Lcopy_bytes\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* 0 < len < NBYTES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #define COPY_BYTE(N) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) LOADB(t0, N(src), .Ll_exc\@); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) SUB len, len, 1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) beqz len, .Ldone\@; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) STOREB(t0, N(dst), .Ls_exc_p1\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) COPY_BYTE(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) COPY_BYTE(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #ifdef USE_DOUBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) COPY_BYTE(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) COPY_BYTE(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) COPY_BYTE(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) COPY_BYTE(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) LOADB(t0, NBYTES-2(src), .Ll_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) SUB len, len, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) jr ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .Ldone\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) jr ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #ifdef CONFIG_CPU_NO_LOAD_STORE_LR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) .Lcopy_unaligned_bytes\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) COPY_BYTE(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) COPY_BYTE(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) COPY_BYTE(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) COPY_BYTE(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) COPY_BYTE(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) COPY_BYTE(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) COPY_BYTE(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) COPY_BYTE(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ADD src, src, 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) b 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ADD dst, dst, 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) #endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) .if __memcpy == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) END(memcpy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) .set __memcpy, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) .hidden __memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) .Ll_exc_copy\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Copy bytes from src until faulting load address (or until a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * lb faults)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * may be more than a byte beyond the last address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * Hence, the lb below may get an exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Assumes src < THREAD_BUADDR($28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) LOADK t0, TI_TASK($28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) LOADK t0, THREAD_BUADDR(t0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) LOADB(t1, 0(src), .Ll_exc\@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ADD src, src, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) sb t1, 0(dst) # can't fault -- we're copy_from_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) .set reorder /* DADDI_WAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ADD dst, dst, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) bne src, t0, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) .Ll_exc\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) LOADK t0, TI_TASK($28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) SUB len, AT, t0 # len number of uncopied bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) jr ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #define SEXC(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) .set reorder; /* DADDI_WAR */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) .Ls_exc_p ## n ## u\@: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ADD len, len, n*NBYTES; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) jr ra; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) SEXC(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) SEXC(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) SEXC(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) SEXC(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) SEXC(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) SEXC(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) SEXC(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) SEXC(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) .Ls_exc_p1\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .set reorder /* DADDI_WAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ADD len, len, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) jr ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) .Ls_exc\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) jr ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #ifndef CONFIG_HAVE_PLAT_MEMCPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) LEAF(memmove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) EXPORT_SYMBOL(memmove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ADD t0, a0, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ADD t1, a1, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) sltu t0, a1, t0 # dst + len <= src -> memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) sltu t1, a0, t1 # dst >= src + len -> memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) and t0, t1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) beqz t0, .L__memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) move v0, a0 /* return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) beqz a2, .Lr_out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) END(memmove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* fall through to __rmemcpy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) sltu t0, a1, a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) beqz t0, .Lr_end_bytes_up # src >= dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ADD a0, a2 # dst = dst + len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ADD a1, a2 # src = src + len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) .Lr_end_bytes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) lb t0, -1(a1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) SUB a2, a2, 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) sb t0, -1(a0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) SUB a1, a1, 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) .set reorder /* DADDI_WAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) SUB a0, a0, 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) bnez a2, .Lr_end_bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) .Lr_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) jr ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) move a2, zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) .Lr_end_bytes_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) R10KCBARRIER(0(ra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) lb t0, (a1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) SUB a2, a2, 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) sb t0, (a0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ADD a1, a1, 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) .set reorder /* DADDI_WAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ADD a0, a0, 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) bnez a2, .Lr_end_bytes_up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) .set noreorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) jr ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) move a2, zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) END(__rmemcpy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * A combined memcpy/__copy_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * __copy_user sets len to 0 for success; else to an upper bound of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * the number of uncopied bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * memcpy sets v0 to dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) LEAF(memcpy) /* a0=dst a1=src a2=len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) EXPORT_SYMBOL(memcpy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) move v0, dst /* return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) .L__memcpy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) FEXPORT(__copy_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) EXPORT_SYMBOL(__copy_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* Legacy Mode, user <-> user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) #ifdef CONFIG_EVA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * For EVA we need distinct symbols for reading and writing to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * This is because we need to use specific EVA instructions to perform the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * virtual <-> physical translation when a virtual address is actually in user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * __copy_from_user (EVA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) LEAF(__copy_from_user_eva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) EXPORT_SYMBOL(__copy_from_user_eva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) END(__copy_from_user_eva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * __copy_to_user (EVA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) LEAF(__copy_to_user_eva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) EXPORT_SYMBOL(__copy_to_user_eva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) __BUILD_COPY_USER EVA_MODE KERNELOP USEROP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) END(__copy_to_user_eva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * __copy_in_user (EVA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) LEAF(__copy_in_user_eva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) EXPORT_SYMBOL(__copy_in_user_eva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) __BUILD_COPY_USER EVA_MODE USEROP USEROP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) END(__copy_in_user_eva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) #endif