^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * TLB miss handler for SH with an MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1999 Niibe Yutaka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2003 - 2012 Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Called with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) asmlinkage int __kprobes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * We don't take page faults for P1, P2, and parts of P4, these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * are always mapped, whether it be due to legacy behaviour in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * 29-bit mode, or due to PMB configuration in 32-bit mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (address >= P3SEG && address < P3_ADDR_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) pgd = pgd_offset_k(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (unlikely(address >= TASK_SIZE || !current->mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) pgd = pgd_offset(current->mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (p4d_none_or_clear_bad(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (pud_none_or_clear_bad(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (pmd_none_or_clear_bad(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) pte = pte_offset_kernel(pmd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) entry = *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (unlikely(pte_none(entry) || pte_not_present(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (unlikely(error_code && !pte_write(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) entry = pte_mkdirty(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) entry = pte_mkyoung(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) set_pte(pte, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * the case of an initial page write exception, so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * flush it in order to avoid potential TLB entry duplication.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (error_code == FAULT_CODE_INITIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) local_flush_tlb_one(get_asid(), address & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) set_thread_fault_code(error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) update_mmu_cache(NULL, address, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }