^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * vineetg: May 2011
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * They are semantically the same although in different contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * VALID marks a TLB entry exists and it will only happen if PRESENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * - Utilise some unused free bits to confine PTE flags to 12 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This is a must for 4k pg-sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * -TLB Locking never really existed, except for initial specs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * -SILENT_xxx not needed for our port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * -Per my request, MMU V3 changes the layout of some of the bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * to avoid a few shifts in TLB Miss handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * vineetg: April 2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * vineetg: April 2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * -Switched form 8:11:13 split for page table lookup to 11:8:13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * -this speeds up page table allocation itself as we now have to memset 1K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * instead of 8k per page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * -TODO: Right now page table alloc is 8K and rest 7K is unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * need to optimise it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #ifndef _ASM_ARC_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define _ASM_ARC_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm-generic/pgtable-nopmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/mmu.h> /* to propagate CONFIG_ARC_MMU_VER <n> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Page Table Flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * ARC700 MMU only deals with softare managed TLB entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Page Tables are purely for Linux VM's consumption and the bits below are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * suited to that (uniqueness). Hence some are not implemented in the TLB and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * some have different value in TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * seperate PD0 and PD1, which combined forms a translation entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * while for PTE perspective, they are 8 and 9 respectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * (saves some bit shift ops in TLB Miss hdlrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #if (CONFIG_ARC_MMU_VER <= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define _PAGE_READ (1<<5) /* Page has user read perm (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define _PAGE_SPECIAL (1<<7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #else /* MMU v3 onwards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define _PAGE_READ (1<<3) /* Page has user read perm (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define _PAGE_SPECIAL (1<<6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #if (CONFIG_ARC_MMU_VER >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #if (CONFIG_ARC_MMU_VER >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) usable for shared TLB entries (H) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define _PAGE_UNUSED_BIT (1<<12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* vmalloc permissions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) _PAGE_GLOBAL | _PAGE_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifndef CONFIG_ARC_CACHE_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #undef _PAGE_CACHEABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define _PAGE_CACHEABLE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #ifndef _PAGE_HW_SZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define _PAGE_HW_SZ 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Defaults for every user page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Set of bits not changed in pte_modify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) _PAGE_SPECIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* More Abbrevaited helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define PAGE_U_NONE __pgprot(___DEF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) _PAGE_EXECUTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define PAGE_SHARED PAGE_U_W_R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * user vaddr space - visible in all addr spaces, but kernel mode only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Thus Global, all-kernel-access, no-user-access, cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* ioremap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Masks for actual TLB "PD"s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Certain cases have 1:1 mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * which directly corresponds to PAGE_U_X_R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Other rules which cause the divergence from 1:1 mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * 1. Although ARC700 can do exclusive execute/write protection (meaning R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * can be tracked independet of X/W unlike some other CPUs), still to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * keep things consistent with other archs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * -Write implies Read: W => R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * -Execute implies Read: X => R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * This is to enable COW mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* xwr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define __P000 PAGE_U_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define __P001 PAGE_U_R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define __P010 PAGE_U_R /* Pvt-W => !W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define __P011 PAGE_U_R /* Pvt-W => !W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define __P100 PAGE_U_X_R /* X => R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define __P101 PAGE_U_X_R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define __P111 PAGE_U_X_R /* Pvt-W => !W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define __S000 PAGE_U_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define __S001 PAGE_U_R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define __S010 PAGE_U_W_R /* W => R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define __S011 PAGE_U_W_R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define __S100 PAGE_U_X_R /* X => R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define __S101 PAGE_U_X_R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define __S110 PAGE_U_X_W_R /* X => R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define __S111 PAGE_U_X_W_R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /****************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * 2 tier (PGD:PTE) software page walker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * [31] 32 bit virtual address [0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * -------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * | | <------------ PGDIR_SHIFT ----------> |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * -------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * | | --> off in page frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * | ---> index into Page Table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * ----> index into Page Directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * In a single page size configuration, only PAGE_SHIFT is fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * So both PGD and PTE sizing can be tweaked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * e.g. 8K page (PAGE_SHIFT 13) can have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * - PGDIR_SHIFT 21 -> 11:8:13 address split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * - PGDIR_SHIFT 24 -> 8:11:13 address split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * so the sizing flexibility is gone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #if defined(CONFIG_ARC_HUGEPAGE_16M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define PGDIR_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #elif defined(CONFIG_ARC_HUGEPAGE_2M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define PGDIR_SHIFT 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Only Normal page support so "hackable" (see comment above)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Default value provides 11:8:13 (8K), 11:9:12 (4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define PGDIR_SHIFT 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define BITS_FOR_PGD (32 - PGDIR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define PGDIR_SIZE BIT(PGDIR_SHIFT) /* vaddr span, not PDG sz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define PGDIR_MASK (~(PGDIR_SIZE-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define PTRS_PER_PTE BIT(BITS_FOR_PTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define PTRS_PER_PGD BIT(BITS_FOR_PGD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * Number of entries a user land program use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * TASK_SIZE is the maximum vaddr that can be used by a userland program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * No special requirements for lowest virtual address we permit any user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * mapping to be mapped at.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define FIRST_USER_ADDRESS 0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /****************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * Bucket load of VM Helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define pte_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define pgd_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* the zero page used for uninitialized and anonymous pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) extern char empty_zero_page[PAGE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* find the page descriptor of the Page Tbl ref by PMD entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* In a 2 level sys, setup the PGD entry with PTE value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) pmd_val(*pmdp) = (unsigned long)ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define pte_none(x) (!pte_val(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #define pmd_none(x) (!pmd_val(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #define pmd_present(x) (pmd_val(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define pte_page(pte) pfn_to_page(pte_pfn(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* Zoo of pte_xxx function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define pte_read(pte) (pte_val(pte) & _PAGE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define PTE_BIT_FUNC(fn, op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Macro to mark a page protection as uncacheable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) pte_t *ptep, pte_t pteval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) set_pte(ptep, pteval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Macro to quickly access the PGD entry, utlising the fact that some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * arch may cache the pointer to Page Directory of "current" task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * in a MMU register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * becomes read a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * ********CAUTION*******:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * Kernel code might be dealing with some mm_struct of NON "current"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * Thus use this macro only when you are certain that "current" is current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * e.g. when dealing with signal frame setup code etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #ifdef ARC_USE_SCRATCH_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define pgd_offset_fast(mm, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pgd_base + pgd_index(addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pte_t *ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* Encode swap {type,off} tuple into PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * PAGE_PRESENT is zero in a PTE holding swap "identifier"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #define __swp_entry(type, off) ((swp_entry_t) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ((type) & 0x1f) | ((off) << 13) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Decode a PTE containing swap "identifier "into constituents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* NOPs, to keep generic kernel happy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #define kern_addr_valid(addr) (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * remap a physical page `pfn' of size `size' with page protection `prot'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * into virtual address `from'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #include <asm/hugepage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* to cope with aliasing VIPT cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #define HAVE_ARCH_UNMAPPED_AREA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif