Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * include/asm-xtensa/page.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * it under the terms of the GNU General Public License version2 as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2001 - 2007 Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifndef _XTENSA_PAGE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define _XTENSA_PAGE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/kmem_layout.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * PAGE_SHIFT determines the page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define PAGE_SHIFT	12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define PAGE_SIZE	(__XTENSA_UL_CONST(1) << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define PAGE_MASK	(~(PAGE_SIZE-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define PAGE_OFFSET	XCHAL_KSEG_CACHED_VADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define PHYS_OFFSET	XCHAL_KSEG_PADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define MAX_LOW_PFN	(PHYS_PFN(XCHAL_KSEG_PADDR) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 			 PHYS_PFN(XCHAL_KSEG_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define PAGE_OFFSET	_AC(CONFIG_DEFAULT_MEM_START, UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define PHYS_OFFSET	_AC(CONFIG_DEFAULT_MEM_START, UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define MAX_LOW_PFN	PHYS_PFN(0xfffffffful)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * Cache aliasing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * If the cache size for one way is greater than the page size, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * deal with cache aliasing. The cache index is wider than the page size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * |    |cache| cache index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * | pfn  |off|	virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * |xxxx:X|zzz|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * |    : |   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * | \  / |   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * |trans.|   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * | /  \ |   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * |yyyy:Y|zzz|	physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * When the page number is translated to the physical page address, the lowest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * bit(s) (X) that are part of the cache index are also translated (Y).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * If this translation changes bit(s) (X), the cache index is also afected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * thus resulting in a different cache line than before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * The kernel does not provide a mechanism to ensure that the page color
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * (represented by this bit) remains the same when allocated or when pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * are remapped. When user pages are mapped into kernel space, the color of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * the page might also change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * to temporarily map a patch so we can match the color.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #if DCACHE_WAY_SIZE > PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) # define DCACHE_ALIAS_ORDER	(DCACHE_WAY_SHIFT - PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) # define DCACHE_ALIAS_MASK	(PAGE_MASK & (DCACHE_WAY_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) # define DCACHE_ALIAS(a)	(((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) # define DCACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) # define DCACHE_ALIAS_ORDER	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) # define DCACHE_ALIAS(a)	((void)(a), 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define DCACHE_N_COLORS		(1 << DCACHE_ALIAS_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #if ICACHE_WAY_SIZE > PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) # define ICACHE_ALIAS_ORDER	(ICACHE_WAY_SHIFT - PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) # define ICACHE_ALIAS_MASK	(PAGE_MASK & (ICACHE_WAY_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) # define ICACHE_ALIAS(a)	(((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) # define ICACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) # define ICACHE_ALIAS_ORDER	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #ifdef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define __pgprot(x)	(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * These are used to make use of C type-checking..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) typedef struct { unsigned long pte; } pte_t;		/* page table entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) typedef struct { unsigned long pgd; } pgd_t;		/* PGD table entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) typedef struct { unsigned long pgprot; } pgprot_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) typedef struct page *pgtable_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define pte_val(x)	((x).pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define pgd_val(x)	((x).pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define pgprot_val(x)	((x).pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define __pte(x)	((pte_t) { (x) } )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define __pgd(x)	((pgd_t) { (x) } )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define __pgprot(x)	((pgprot_t) { (x) } )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * Pure 2^n version of get_order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * Use 'nsau' instructions if supported by the processor or the generic version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #if XCHAL_HAVE_NSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline __attribute_const__ int get_order(unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	int lz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return 32 - lz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) # include <asm-generic/getorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct vm_area_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) extern void clear_page(void *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) extern void copy_page(void *to, void *from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * If we have cache aliasing and writeback caches, we might have to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * some extra work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) extern void clear_page_alias(void *vaddr, unsigned long paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) extern void copy_page_alias(void *to, void *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			    unsigned long to_paddr, unsigned long from_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define clear_user_highpage clear_user_highpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void clear_user_highpage(struct page *page, unsigned long vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define __HAVE_ARCH_COPY_USER_HIGHPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void copy_user_highpage(struct page *to, struct page *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			unsigned long vaddr, struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) # define clear_user_page(page, vaddr, pg)	clear_page(page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) # define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * This handles the memory map.  We handle pages at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * These macros are for conversion of kernel address, not user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define ARCH_PFN_OFFSET		(PHYS_OFFSET >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static inline unsigned long ___pa(unsigned long va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	unsigned long off = va - PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (off >= XCHAL_KSEG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		off -= XCHAL_KSEG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #ifndef CONFIG_XIP_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	return off + PHYS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (off < XCHAL_KSEG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		return off + PHYS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	off -= XCHAL_KSEG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (off >= XCHAL_KIO_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		off -= XCHAL_KIO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	return off + XCHAL_KIO_PADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define __pa(x)	___pa((unsigned long)(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define __pa(x)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define __va(x)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define pfn_valid(pfn) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #ifdef CONFIG_DISCONTIGMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) # error CONFIG_DISCONTIGMEM not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #include <asm-generic/memory_model.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #endif /* _XTENSA_PAGE_H */