^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * xtensa mmu stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Extracted from init.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/initialize_mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #if defined(CONFIG_HIGHMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) pmd_t *pmd = pmd_off_k(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) n_pages = ALIGN(n_pages, PTRS_PER_PTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) __func__, vaddr, n_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) panic("%s: Failed to allocate %lu bytes align=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) for (i = 0; i < n_pages; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) pte_clear(NULL, 0, pte + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pte_t *cur_pte = pte + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) BUG_ON(!pmd_none(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __func__, pmd, cur_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static void __init fixedrange_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) init_pmd(__fix_to_virt(0), __end_of_fixed_addresses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void __init paging_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) fixedrange_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) kmap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Flush the mmu and reset associated register to default values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void init_mmu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Writing zeros to the instruction and data TLBCFG special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * registers ensure that valid values exist in the register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * For existing PGSZID<w> fields, zero selects the first element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * of the page-size array. For nonexistent PGSZID<w> fields,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * zero is the best value to write. Also, when changing PGSZID<w>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * fields, the corresponding TLB must be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) set_itlbcfg_register(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) set_dtlbcfg_register(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) init_kio();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Set rasid register to a known value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Set PTEVADDR special register to the start of the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * table, which is in kernel mappable space (ie. not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * statically mapped). This register's value is undefined on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void init_kio(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Update the IO area mapping in case xtensa_kio_paddr has changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) XCHAL_KIO_CACHED_VADDR + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) XCHAL_KIO_CACHED_VADDR + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) XCHAL_KIO_BYPASS_VADDR + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) XCHAL_KIO_BYPASS_VADDR + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }