^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/m68k/mm/cache.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Instruction cache handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1995 Hamish Macdonald
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static unsigned long virt_to_phys_slow(unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) if (CPU_IS_060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* The PLPAR instruction causes an access error if the translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * is not possible. To catch this we use the same exception mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * as for user space accesses in <asm/uaccess.h>. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) asm volatile (".chip 68060\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) "1: plpar (%0)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) ".chip 68k\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) "2:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) ".section .fixup,\"ax\"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) " .even\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) "3: sub.l %0,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) " jra 2b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ".previous\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) ".section __ex_table,\"a\"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) " .align 4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) " .long 1b,3b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) ".previous"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) : "=a" (paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) : "0" (vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) } else if (CPU_IS_040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long mmusr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) asm volatile (".chip 68040\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) "ptestr (%1)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) "movec %%mmusr, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ".chip 68k"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) : "=r" (mmusr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) : "a" (vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (mmusr & MMU_R_040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned short mmusr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long *descaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) asm volatile ("ptestr %3,%2@,#7,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) "pmove %%psr,%1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) : "=a&" (descaddr), "=m" (mmusr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) : "a" (vaddr), "d" (get_fs().seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (mmusr & (MMU_I|MMU_B|MMU_L))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) descaddr = phys_to_virt((unsigned long)descaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) switch (mmusr & MMU_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Push n pages at kernel virtual address and clear the icache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void flush_icache_user_range(unsigned long address, unsigned long endaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (CPU_IS_COLDFIRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) start = address & ICACHE_SET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) end = endaddr & ICACHE_SET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (start > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) flush_cf_icache(0, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) end = ICACHE_MAX_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) flush_cf_icache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) } else if (CPU_IS_040_OR_060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) address &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) asm volatile ("nop\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ".chip 68040\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) "cpushp %%bc,(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ".chip 68k"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) : : "a" (virt_to_phys_slow(address)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) address += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) } while (address < endaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) asm volatile ("movec %%cacr,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) "orw %1,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) "movec %0,%%cacr"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) : "=&d" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) : "di" (FLUSH_I));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void flush_icache_range(unsigned long address, unsigned long endaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mm_segment_t old_fs = get_fs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) set_fs(KERNEL_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) flush_icache_user_range(address, endaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) set_fs(old_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) EXPORT_SYMBOL(flush_icache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned long addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (CPU_IS_COLDFIRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) start = addr & ICACHE_SET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) end = (addr + len) & ICACHE_SET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (start > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) flush_cf_icache(0, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) end = ICACHE_MAX_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) flush_cf_icache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) } else if (CPU_IS_040_OR_060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) asm volatile ("nop\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ".chip 68040\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) "cpushp %%bc,(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ".chip 68k"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) : : "a" (page_to_phys(page)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) asm volatile ("movec %%cacr,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) "orw %1,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) "movec %0,%%cacr"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) : "=&d" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) : "di" (FLUSH_I));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)