^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/copypage-v4wt.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1995-1999 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This is for CPUs with a writethrough cache and 'flush ID cache' is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * the only supported cache operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * ARMv4 optimised copy_user_highpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Since we have writethrough caches, we don't have to worry about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * dirty data in the cache. However, we do have to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * subsequent reads are up to date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static void v4wt_copy_user_page(void *kto, const void *kfrom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) asm volatile ("\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) .syntax unified\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) ldmia %1!, {r3, r4, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 1: stmia %0!, {r3, r4, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) stmia %0!, {r3, r4, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) ldmia %1!, {r3, r4, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) stmia %0!, {r3, r4, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) ldmia %1!, {r3, r4, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) subs %2, %2, #1 @ 1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) stmia %0!, {r3, r4, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) bne 1b @ 1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) mcr p15, 0, %2, c7, c7, 0 @ flush ID cache"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) : "2" (PAGE_SIZE / 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) : "r3", "r4", "ip", "lr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void v4wt_copy_user_highpage(struct page *to, struct page *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long vaddr, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void *kto, *kfrom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) kto = kmap_atomic(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) kfrom = kmap_atomic(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) v4wt_copy_user_page(kto, kfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) kunmap_atomic(kfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) kunmap_atomic(kto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * ARMv4 optimised clear_user_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Same story as above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void *ptr, *kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) asm volatile("\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mov r1, %2 @ 1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) mov r2, #0 @ 1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mov r3, #0 @ 1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) mov ip, #0 @ 1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) mov lr, #0 @ 1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) stmia %0!, {r2, r3, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) stmia %0!, {r2, r3, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) stmia %0!, {r2, r3, ip, lr} @ 4\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) subs r1, r1, #1 @ 1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bne 1b @ 1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) mcr p15, 0, r2, c7, c7, 0 @ flush ID cache"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) : "=r" (ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) : "0" (kaddr), "I" (PAGE_SIZE / 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) : "r1", "r2", "r3", "ip", "lr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct cpu_user_fns v4wt_user_fns __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .cpu_clear_user_highpage = v4wt_clear_user_highpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .cpu_copy_user_highpage = v4wt_copy_user_highpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };