Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * OpenRISC Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Linux architectural port borrowing liberally from similar works of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * others.  All original copyrights apply as per the original source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * declaration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * OpenRISC implementation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * et al.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) /* or32 pgtable.h - macros and functions to manipulate page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * Based on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * include/asm-cris/pgtable.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #ifndef __ASM_OPENRISC_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define __ASM_OPENRISC_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm-generic/pgtable-nopmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * The Linux memory management assumes a three-level page table setup. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * or32, we use that, but "fold" the mid level into the top-level page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * table. Since the MMU TLB is software loaded through an interrupt, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * supports any page table structure, so we could have used a three-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * setup, but for the amounts of memory we normally use, a two-level is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * probably more efficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * This file contains the functions and defines necessary to modify and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * the or32 page table tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) extern void paging_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /* Certain architectures need to do special things when pte's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * within a page table are directly modified.  Thus, the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * hook is made available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * (pmds are folded into pgds so this doesn't get actually called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * but the define is needed for a generic inline function.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define PGDIR_MASK	(~(PGDIR_SIZE-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * entries per page directory level: we use a two-level, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * we don't really have any PMD directory physically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * pointers are 4 bytes so we can use the page size and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * divide it by 4 (shift by 2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define PTRS_PER_PGD	(1UL << (32-PGDIR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) /* calculate how many PGD entries a user-level program can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * the first mappable virtual address is 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * (TASK_SIZE is the maximum virtual address space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define USER_PTRS_PER_PGD       (TASK_SIZE/PGDIR_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define FIRST_USER_ADDRESS      0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * Kernels own virtual memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * The size and location of the vmalloc area are chosen so that modules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * placed in this area aren't more than a 28-bit signed offset from any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * kernel functions that they may need.  This greatly simplifies handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * of the relocations for l.j and l.jal instructions as we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * introduce any trampolines for reaching "distant" code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * 64 MB of vmalloc area is comparable to what's available on other arches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #define VMALLOC_START	(PAGE_OFFSET-0x04000000UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define VMALLOC_END	(PAGE_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define VMALLOC_VMADDR(x) ((unsigned long)(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) /* Define some higher level generic page attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * If you change _PAGE_CI definition be sure to change it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * io.h for ioremap() too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * An OR32 PTE looks like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * |  31 ... 10 |  9  |  8 ... 6  |  5  |  4  |  3  |  2  |  1  |  0  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  *  Phys pg.num    L     PP Index    D     A    WOM   WBC   CI    CC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *  L  : link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  *  PPI: Page protection index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  *  D  : Dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  *  A  : Accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  *  WOM: Weakly ordered memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  *  WBC: Write-back cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  *  CI : Cache inhibit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  *  CC : Cache coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * The protection bits below should correspond to the layout of the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * PTE as per above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define _PAGE_CC       0x001 /* software: pte contains a translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define _PAGE_CI       0x002 /* cache inhibit          */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define _PAGE_WBC      0x004 /* write back cache       */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define _PAGE_WOM      0x008 /* weakly ordered memory  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define _PAGE_A        0x010 /* accessed               */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define _PAGE_D        0x020 /* dirty                  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define _PAGE_URE      0x040 /* user read enable       */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define _PAGE_UWE      0x080 /* user write enable      */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define _PAGE_SRE      0x100 /* superuser read enable  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define _PAGE_SWE      0x200 /* superuser write enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define _PAGE_EXEC     0x400 /* software: page is executable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* 0x001 is cache coherency bit, which should always be set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  *       1 - for SMP (when we support it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *       0 - otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * we just reuse this bit in software for _PAGE_PRESENT and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * force it to 0 when loading it into TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define _PAGE_PRESENT  _PAGE_CC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define _PAGE_USER     _PAGE_URE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define _PAGE_WRITE    (_PAGE_UWE | _PAGE_SWE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define _PAGE_DIRTY    _PAGE_D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define _PAGE_ACCESSED _PAGE_A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define _PAGE_NO_CACHE _PAGE_CI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define _PAGE_SHARED   _PAGE_U_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define _PAGE_READ     (_PAGE_URE | _PAGE_SRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define _PAGE_BASE     (_PAGE_PRESENT | _PAGE_ACCESSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define _PAGE_ALL      (_PAGE_PRESENT | _PAGE_ACCESSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define _KERNPG_TABLE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	(_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define PAGE_NONE       __pgprot(_PAGE_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define PAGE_READONLY   __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define PAGE_SHARED \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	__pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		 | _PAGE_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define PAGE_SHARED_X \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	__pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		 | _PAGE_SHARED | _PAGE_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define PAGE_COPY       __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define PAGE_COPY_X     __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define PAGE_KERNEL \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define PAGE_KERNEL_RO \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	__pgprot(_PAGE_ALL | _PAGE_SRE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define PAGE_KERNEL_NOCACHE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define __P000	PAGE_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define __P001	PAGE_READONLY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define __P010	PAGE_COPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define __P011	PAGE_COPY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define __P100	PAGE_READONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define __P101	PAGE_READONLY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define __P110	PAGE_COPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define __P111	PAGE_COPY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define __S000	PAGE_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define __S001	PAGE_READONLY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define __S010	PAGE_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define __S011	PAGE_SHARED_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define __S100	PAGE_READONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define __S101	PAGE_READONLY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define __S110	PAGE_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define __S111	PAGE_SHARED_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* zero page used for uninitialized stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) extern unsigned long empty_zero_page[2048];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* number of bits that fit into a memory pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define BITS_PER_PTR			(8*sizeof(unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* to align the pointer to a pointer address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define PTR_MASK			(~(sizeof(void *)-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* 64-bit machines, beware!  SRB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define SIZEOF_PTR_LOG2			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* to find an entry in a page-table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define PAGE_PTR(address) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* to set the page-dir */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define SET_PAGE_DIR(tsk, pgdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define pte_none(x)	(!pte_val(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define pte_clear(mm, addr, xp)	do { pte_val(*(xp)) = 0; } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define pmd_none(x)	(!pmd_val(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define pmd_clear(xp)	do { pmd_val(*(xp)) = 0; } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * The following only work if pte_present() is true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * Undefined behaviour if not..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_READ; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline pte_t pte_wrprotect(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	pte_val(pte) &= ~(_PAGE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static inline pte_t pte_rdprotect(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	pte_val(pte) &= ~(_PAGE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static inline pte_t pte_exprotect(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	pte_val(pte) &= ~(_PAGE_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static inline pte_t pte_mkclean(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	pte_val(pte) &= ~(_PAGE_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline pte_t pte_mkold(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	pte_val(pte) &= ~(_PAGE_ACCESSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static inline pte_t pte_mkwrite(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	pte_val(pte) |= _PAGE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static inline pte_t pte_mkread(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	pte_val(pte) |= _PAGE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static inline pte_t pte_mkexec(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	pte_val(pte) |= _PAGE_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static inline pte_t pte_mkdirty(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	pte_val(pte) |= _PAGE_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static inline pte_t pte_mkyoung(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	pte_val(pte) |= _PAGE_ACCESSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * Conversion functions: convert a page and protection to a page entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * and a page entry and page directory to the page they refer to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* What actually goes as arguments to the various functions is less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  * obvious, but a rule of thumb is that struct page's goes as struct page *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * really physical DRAM addresses are unsigned long's, and DRAM "virtual"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  * addresses (the 0xc0xxxxxx's) goes as void *'s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	/* the PTE needs a physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	pte_val(pte) = __pa(page) | pgprot_val(pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #define mk_pte_phys(physpage, pgprot) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ({                                                                      \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	pte_t __pte;                                                    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	pte_val(__pte) = (physpage) + pgprot_val(pgprot);               \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	__pte;                                                          \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)  * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  * __pte_page(pte_val) refers to the "virtual" DRAM interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * pte_pagenr refers to the page-number counted starting from the virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  * DRAM start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static inline unsigned long __pte_page(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	/* the PTE contains a physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	return (unsigned long)__va(pte_val(pte) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define pte_pagenr(pte)         ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* permanent address of a page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #define pte_page(pte)		(mem_map+pte_pagenr(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * only the pte's themselves need to point to physical DRAM (see above)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  * the pagetable links are purely handled within the kernel SW and thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  * don't need the __pa and __va transformations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static inline unsigned long pmd_page_vaddr(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #define __pmd_offset(address) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #define pte_pfn(x)		((unsigned long)(((x).pte)) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #define pfn_pte(pfn, prot)  __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #define pte_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	       __FILE__, __LINE__, &(e), pte_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #define pgd_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	       __FILE__, __LINE__, &(e), pgd_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct vm_area_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static inline void update_tlb(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	unsigned long address, pte_t *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) extern void update_cache(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	unsigned long address, pte_t *pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static inline void update_mmu_cache(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	unsigned long address, pte_t *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	update_tlb(vma, address, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	update_cache(vma, address, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* __PHX__ FIXME, SWAP, this probably doesn't work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #define __swp_type(x)			(((x).val >> 5) & 0x7f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #define __swp_offset(x)			((x).val >> 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #define __swp_entry(type, offset) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	((swp_entry_t) { ((type) << 5) | ((offset) << 12) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #define kern_addr_valid(addr)           (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) typedef pte_t *pte_addr_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #endif /* __ASM_OPENRISC_PGTABLE_H */