Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  S390 version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *    Copyright IBM Corp. 1999, 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *    Author(s): Hartmut Penner (hp@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *               Ulrich Weigand (weigand@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *  Derived from "include/asm-i386/pgtable.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #ifndef _ASM_S390_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define _ASM_S390_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/page-flags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/radix-tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) extern pgd_t swapper_pg_dir[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) extern void paging_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	PG_DIRECT_MAP_4K = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	PG_DIRECT_MAP_1M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	PG_DIRECT_MAP_2G,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	PG_DIRECT_MAP_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static inline void update_page_count(int level, long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	if (IS_ENABLED(CONFIG_PROC_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 		atomic_long_add(count, &direct_pages_count[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) struct seq_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) void arch_report_meminfo(struct seq_file *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * The S390 doesn't have any external MMU info: the kernel page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * tables contain all the necessary information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define update_mmu_cache(vma, address, ptep)     do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * ZERO_PAGE is a global shared page that is always zero; used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * for zero-mapped memory areas etc..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) extern unsigned long empty_zero_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) extern unsigned long zero_page_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define ZERO_PAGE(vaddr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	(virt_to_page((void *)(empty_zero_page + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	 (((unsigned long)(vaddr)) &zero_page_mask))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define __HAVE_COLOR_ZERO_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) /* TODO: s390 cannot support io_remap_pfn_range... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define FIRST_USER_ADDRESS  0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define pte_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define pmd_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define pud_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define p4d_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define pgd_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * The vmalloc and module area will always be on the topmost area of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * modules will reside. That makes sure that inter module branches always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * happen without trampolines and in addition the placement within a 2GB frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * is branch prediction unit friendly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) extern unsigned long VMALLOC_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) extern unsigned long VMALLOC_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define VMALLOC_DEFAULT_SIZE	((128UL << 30) - MODULES_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) extern struct page *vmemmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) extern unsigned long vmemmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) extern unsigned long MODULES_VADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) extern unsigned long MODULES_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define MODULES_VADDR	MODULES_VADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define MODULES_END	MODULES_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define MODULES_LEN	(1UL << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static inline int is_module_addr(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	if (addr < (void *)MODULES_VADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (addr > (void *)MODULES_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * A 64 bit pagetable entry of S390 has following format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * |			 PFRA			      |0IPC|  OS  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * 0000000000111111111122222222223333333333444444444455555555556666
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * 0123456789012345678901234567890123456789012345678901234567890123
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * I Page-Invalid Bit:    Page is not available for address-translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  * P Page-Protection Bit: Store access not possible for page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * C Change-bit override: HW is not required to set change bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * A 64 bit segmenttable entry of S390 has following format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  * |        P-table origin                              |      TT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * 0000000000111111111122222222223333333333444444444455555555556666
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  * 0123456789012345678901234567890123456789012345678901234567890123
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * I Segment-Invalid Bit:    Segment is not available for address-translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * P Page-Protection Bit: Store access not possible for page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * TT Type 00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * A 64 bit region table entry of S390 has following format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * |        S-table origin                             |   TF  TTTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * 0000000000111111111122222222223333333333444444444455555555556666
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * 0123456789012345678901234567890123456789012345678901234567890123
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * I Segment-Invalid Bit:    Segment is not available for address-translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * TT Type 01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * TF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * TL Table length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * The 64 bit regiontable origin of S390 has following format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  * |      region table origon                          |       DTTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * 0000000000111111111122222222223333333333444444444455555555556666
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * 0123456789012345678901234567890123456789012345678901234567890123
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  * X Space-Switch event:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * G Segment-Invalid Bit:  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * P Private-Space Bit:    
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * S Storage-Alteration:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  * R Real space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * TL Table-Length:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * A storage key has the following format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * | ACC |F|R|C|0|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  *  0   3 4 5 6 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * ACC: access key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * F  : fetch protection bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * R  : referenced bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * C  : changed bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) /* Hardware bits in the page table entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define _PAGE_INVALID	0x400		/* HW invalid bit    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) /* Software bits in the page table entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #define _PAGE_PRESENT	0x001		/* SW pte present bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define _PAGE_YOUNG	0x004		/* SW pte young bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) #define _PAGE_READ	0x010		/* SW pte read bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #define _PAGE_WRITE	0x020		/* SW pte write bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #ifdef CONFIG_MEM_SOFT_DIRTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define _PAGE_SOFT_DIRTY 0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) /* Set of bits not changed in pte_modify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * handle_pte_fault uses pte_present and pte_none to find out the pte type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * distinguish present from not-present ptes. It is changed only with the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * table lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * The following table gives the different possible bit combinations for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * the pte hardware and software bits in the last 12 bits of a pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * (. unassigned bit, x don't care, t swap type):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  *				842100000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  *				000084210000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  *				000000008421
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  *				.IR.uswrdy.p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * empty			.10.00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * swap				.11..ttttt.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  * prot-none, clean, old	.11.xx0000.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * prot-none, clean, young	.11.xx0001.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * prot-none, dirty, old	.11.xx0010.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  * prot-none, dirty, young	.11.xx0011.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  * read-only, clean, old	.11.xx0100.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * read-only, clean, young	.01.xx0101.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  * read-only, dirty, old	.11.xx0110.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * read-only, dirty, young	.01.xx0111.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * read-write, clean, old	.11.xx1100.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * read-write, clean, young	.01.xx1101.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * read-write, dirty, old	.10.xx1110.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * read-write, dirty, young	.00.xx1111.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * HW-bits: R read-only, I invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * SW-bits: p present, y young, d dirty, r read, w write, s special,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *	    u unused, l large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) /* Bits in the segment/region table address-space-control-element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) /* Bits in the region table entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region table type mask	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) #define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) #define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) #ifdef CONFIG_MEM_SOFT_DIRTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) /* Bits in the segment table entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) #define _SEGMENT_ENTRY_TYPE_MASK 0x0c	/* segment table type mask	    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) #define _SEGMENT_ENTRY		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) #define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) #define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) #ifdef CONFIG_MEM_SOFT_DIRTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) #define _PAGE_ENTRIES	256	/* number of page table entries	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) #define _REGION1_SHIFT	53
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) #define _REGION2_SHIFT	42
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) #define _REGION3_SHIFT	31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) #define _SEGMENT_SHIFT	20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) #define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) #define PMD_SHIFT	_SEGMENT_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) #define PUD_SHIFT	_REGION3_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) #define P4D_SHIFT	_REGION2_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) #define PGDIR_SHIFT	_REGION1_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) #define PMD_SIZE	_SEGMENT_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) #define PUD_SIZE	_REGION3_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) #define P4D_SIZE	_REGION2_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) #define PGDIR_SIZE	_REGION1_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) #define PMD_MASK	_SEGMENT_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) #define PUD_MASK	_REGION3_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) #define P4D_MASK	_REGION2_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) #define PGDIR_MASK	_REGION1_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) #define PTRS_PER_PTE	_PAGE_ENTRIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) #define PTRS_PER_PMD	_CRST_ENTRIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) #define PTRS_PER_PUD	_CRST_ENTRIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) #define PTRS_PER_P4D	_CRST_ENTRIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) #define PTRS_PER_PGD	_CRST_ENTRIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) #define MAX_PTRS_PER_P4D	PTRS_PER_P4D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  * Segment table and region3 table entry encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  * (R = read-only, I = invalid, y = young bit):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351)  *				dy..R...I...wr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * prot-none, clean, old	00..1...1...00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * prot-none, clean, young	01..1...1...00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  * prot-none, dirty, old	10..1...1...00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  * prot-none, dirty, young	11..1...1...00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * read-only, clean, old	00..1...1...01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * read-only, clean, young	01..1...0...01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * read-only, dirty, old	10..1...1...01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * read-only, dirty, young	11..1...0...01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  * read-write, clean, old	00..1...1...11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * read-write, clean, young	01..1...0...11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * read-write, dirty, old	10..0...1...11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * read-write, dirty, young	11..0...0...11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * The segment table origin is used to distinguish empty (origin==0) from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * read-write, old segment table entries (origin!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  * HW-bits: R read-only, I invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  * SW-bits: y young, d dirty, r read, w write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) /* Page status table bits for virtualization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) #define PGSTE_ACC_BITS	0xf000000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) #define PGSTE_FP_BIT	0x0800000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) #define PGSTE_PCL_BIT	0x0080000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) #define PGSTE_HR_BIT	0x0040000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) #define PGSTE_HC_BIT	0x0020000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) #define PGSTE_GR_BIT	0x0004000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) #define PGSTE_GC_BIT	0x0002000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) #define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) /* Guest Page State used for virtualization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) #define _PGSTE_GPS_ZERO			0x0000000080000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) #define _PGSTE_GPS_NODAT		0x0000000040000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) #define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) #define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) #define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) #define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) #define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  * A user page table pointer has the space-switch-event bit, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  * private-space-control bit and the storage-alteration-event-control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  * bit set. A kernel page table pointer doesn't need them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				 _ASCE_ALT_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)  * Page protection definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) #define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) #define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 				 _PAGE_INVALID | _PAGE_PROTECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) #define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) #define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 				 _PAGE_INVALID | _PAGE_PROTECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				 _PAGE_PROTECT | _PAGE_NOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 				  _PAGE_YOUNG |	_PAGE_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * On s390 the page table entry has an invalid bit and a read-only bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  * Read permission implies execute permission and write permission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  * implies read permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)          /*xwr*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) #define __P000	PAGE_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) #define __P001	PAGE_RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) #define __P010	PAGE_RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) #define __P011	PAGE_RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) #define __P100	PAGE_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) #define __P101	PAGE_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) #define __P110	PAGE_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) #define __P111	PAGE_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) #define __S000	PAGE_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) #define __S001	PAGE_RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) #define __S010	PAGE_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) #define __S011	PAGE_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) #define __S100	PAGE_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) #define __S101	PAGE_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) #define __S110	PAGE_RWX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) #define __S111	PAGE_RWX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  * Segment entry (large page) protection definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 				 _SEGMENT_ENTRY_PROTECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) #define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 				 _SEGMENT_ENTRY_READ | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 				 _SEGMENT_ENTRY_NOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) #define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 				 _SEGMENT_ENTRY_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) #define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 				 _SEGMENT_ENTRY_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 				 _SEGMENT_ENTRY_NOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) #define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 				 _SEGMENT_ENTRY_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) #define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 				 _SEGMENT_ENTRY_LARGE |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 				 _SEGMENT_ENTRY_READ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 				 _SEGMENT_ENTRY_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 				 _SEGMENT_ENTRY_YOUNG | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 				 _SEGMENT_ENTRY_DIRTY | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 				 _SEGMENT_ENTRY_NOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 				 _SEGMENT_ENTRY_LARGE |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 				 _SEGMENT_ENTRY_READ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 				 _SEGMENT_ENTRY_YOUNG |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 				 _SEGMENT_ENTRY_PROTECT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 				 _SEGMENT_ENTRY_NOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 				 _SEGMENT_ENTRY_LARGE |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 				 _SEGMENT_ENTRY_READ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 				 _SEGMENT_ENTRY_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 				 _SEGMENT_ENTRY_YOUNG |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 				 _SEGMENT_ENTRY_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  * Region3 entry (large page) protection definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) #define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 				 _REGION3_ENTRY_LARGE |	 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 				 _REGION3_ENTRY_READ |	 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 				 _REGION3_ENTRY_WRITE |	 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 				 _REGION3_ENTRY_YOUNG |	 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				 _REGION3_ENTRY_DIRTY | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 				 _REGION_ENTRY_NOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 				   _REGION3_ENTRY_LARGE |  \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 				   _REGION3_ENTRY_READ |   \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 				   _REGION3_ENTRY_YOUNG |  \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 				   _REGION_ENTRY_PROTECT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 				   _REGION_ENTRY_NOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static inline bool mm_p4d_folded(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	return mm->context.asce_limit <= _REGION1_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) #define mm_p4d_folded(mm) mm_p4d_folded(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static inline bool mm_pud_folded(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	return mm->context.asce_limit <= _REGION2_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) #define mm_pud_folded(mm) mm_pud_folded(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) static inline bool mm_pmd_folded(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	return mm->context.asce_limit <= _REGION3_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) #define mm_pmd_folded(mm) mm_pmd_folded(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) static inline int mm_has_pgste(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (unlikely(mm->context.has_pgste))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) static inline int mm_is_protected(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	if (unlikely(atomic_read(&mm->context.is_protected)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) static inline int mm_alloc_pgste(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (unlikely(mm->context.alloc_pgste))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  * In the case that a guest uses storage keys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  * faults should no longer be backed by zero pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) #define mm_forbids_zeropage mm_has_pgste
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) static inline int mm_uses_skeys(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (mm->context.uses_skeys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	register unsigned long reg2 asm("2") = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	register unsigned long reg3 asm("3") = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	unsigned long address = (unsigned long)ptr | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		"	csp	%0,%3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		: "+d" (reg2), "+m" (*ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		: "d" (reg3), "d" (address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		: "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	register unsigned long reg2 asm("2") = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	register unsigned long reg3 asm("3") = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	unsigned long address = (unsigned long)ptr | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		"	.insn	rre,0xb98a0000,%0,%3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		: "+d" (reg2), "+m" (*ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		: "d" (reg3), "d" (address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		: "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) #define CRDTE_DTT_PAGE		0x00UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) #define CRDTE_DTT_SEGMENT	0x10UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) #define CRDTE_DTT_REGION3	0x14UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) #define CRDTE_DTT_REGION2	0x18UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) #define CRDTE_DTT_REGION1	0x1cUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static inline void crdte(unsigned long old, unsigned long new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			 unsigned long table, unsigned long dtt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			 unsigned long address, unsigned long asce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	register unsigned long reg2 asm("2") = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	register unsigned long reg3 asm("3") = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	register unsigned long reg4 asm("4") = table | dtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	register unsigned long reg5 asm("5") = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		     : "+d" (reg2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		     : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		     : "memory", "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  * pgd/p4d/pud/pmd/pte query functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) static inline int pgd_folded(pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) static inline int pgd_present(pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	if (pgd_folded(pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) static inline int pgd_none(pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (pgd_folded(pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static inline int pgd_bad(pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) static inline unsigned long pgd_pfn(pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	unsigned long origin_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	origin_mask = _REGION_ENTRY_ORIGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) static inline int p4d_folded(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) static inline int p4d_present(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (p4d_folded(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) static inline int p4d_none(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	if (p4d_folded(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static inline unsigned long p4d_pfn(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	unsigned long origin_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	origin_mask = _REGION_ENTRY_ORIGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static inline int pud_folded(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) static inline int pud_present(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (pud_folded(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) static inline int pud_none(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	if (pud_folded(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) #define pud_leaf	pud_large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) static inline int pud_large(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) #define pmd_leaf	pmd_large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) static inline int pmd_large(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) static inline int pmd_bad(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) static inline int pud_bad(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (type < _REGION_ENTRY_TYPE_R3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) static inline int p4d_bad(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	if (type > _REGION_ENTRY_TYPE_R2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	if (type < _REGION_ENTRY_TYPE_R2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) static inline int pmd_present(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static inline int pmd_none(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) #define pmd_write pmd_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) static inline int pmd_write(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) #define pud_write pud_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) static inline int pud_write(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) static inline int pmd_dirty(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) static inline int pmd_young(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) static inline int pte_present(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	/* Bit pattern: (pte & 0x001) == 0x001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) static inline int pte_none(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	/* Bit pattern: pte == 0x400 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	return pte_val(pte) == _PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) static inline int pte_swap(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	/* Bit pattern: (pte & 0x201) == 0x200 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		== _PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static inline int pte_special(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	return (pte_val(pte) & _PAGE_SPECIAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) #define __HAVE_ARCH_PTE_SAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) static inline int pte_same(pte_t a, pte_t b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	return pte_val(a) == pte_val(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) #ifdef CONFIG_NUMA_BALANCING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) static inline int pte_protnone(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) static inline int pmd_protnone(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	/* pmd_large(pmd) implies pmd_present(pmd) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) static inline int pte_soft_dirty(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) #define pte_swp_soft_dirty pte_soft_dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) static inline pte_t pte_mksoft_dirty(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) #define pte_swp_mksoft_dirty pte_mksoft_dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) static inline pte_t pte_clear_soft_dirty(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static inline int pmd_soft_dirty(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  * query functions pte_write/pte_dirty/pte_young only work if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  * pte_present() is true. Undefined behaviour if not..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) static inline int pte_write(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	return (pte_val(pte) & _PAGE_WRITE) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) static inline int pte_dirty(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) static inline int pte_young(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) #define __HAVE_ARCH_PTE_UNUSED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) static inline int pte_unused(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	return pte_val(pte) & _PAGE_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  * Extract the pgprot value from the given pte while at the same time making it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  * usable for kernel address space mappings where fault driven dirty and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  * must not be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) static inline pgprot_t pte_pgprot(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	if (pte_write(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		pte_flags |= pgprot_val(PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		pte_flags |= pgprot_val(PAGE_KERNEL_RO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	pte_flags |= pte_val(pte) & mio_wb_bit_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	return __pgprot(pte_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887)  * pgd/pmd/pte modification functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) static inline void pgd_clear(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) static inline void p4d_clear(p4d_t *p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) static inline void pud_clear(pud_t *pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static inline void pmd_clear(pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	pte_val(*ptep) = _PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919)  * The following pte modification functions only work if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  * pte_present() is true. Undefined behaviour if not..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	pte_val(pte) &= _PAGE_CHG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	pte_val(pte) |= pgprot_val(newprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	 * has the invalid bit set, clear it again for readable, young pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		pte_val(pte) &= ~_PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	 * protection bit set, clear it again for writable, dirty pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		pte_val(pte) &= ~_PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) static inline pte_t pte_wrprotect(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	pte_val(pte) &= ~_PAGE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	pte_val(pte) |= _PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) static inline pte_t pte_mkwrite(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	pte_val(pte) |= _PAGE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	if (pte_val(pte) & _PAGE_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		pte_val(pte) &= ~_PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static inline pte_t pte_mkclean(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	pte_val(pte) &= ~_PAGE_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	pte_val(pte) |= _PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) static inline pte_t pte_mkdirty(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (pte_val(pte) & _PAGE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		pte_val(pte) &= ~_PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) static inline pte_t pte_mkold(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	pte_val(pte) &= ~_PAGE_YOUNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	pte_val(pte) |= _PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) static inline pte_t pte_mkyoung(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	pte_val(pte) |= _PAGE_YOUNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	if (pte_val(pte) & _PAGE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		pte_val(pte) &= ~_PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) static inline pte_t pte_mkspecial(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	pte_val(pte) |= _PAGE_SPECIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) static inline pte_t pte_mkhuge(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	pte_val(pte) |= _PAGE_LARGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) #define IPTE_GLOBAL	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) #define	IPTE_LOCAL	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) #define IPTE_NODAT	0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #define IPTE_GUEST_ASCE	0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 					unsigned long opt, unsigned long asce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 					int local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	unsigned long pto = (unsigned long) ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	if (__builtin_constant_p(opt) && opt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		/* Invalidation + TLB flush for the pte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			  [m4] "i" (local));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	/* Invalidate ptes with options + TLB flush of the ptes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	opt = opt | (asce & _ASCE_ORIGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		: [r2] "+a" (address), [r3] "+a" (opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 					      pte_t *ptep, int local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	unsigned long pto = (unsigned long) ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	/* Invalidate a range of ptes + TLB flush of the ptes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			: [r2] "+a" (address), [r3] "+a" (nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	} while (nr != 255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)  * both clear the TLB for the unmapped pte. The reason is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  * to modify an active pte. The sequence is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  *   1) ptep_get_and_clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  *   2) set_pte_at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  *   3) flush_tlb_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  * On s390 the tlb needs to get flushed with the modification of the pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  * if the pte is active. The only way how this can be implemented is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)  * is a nop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 					    unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	pte_t pte = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	return pte_young(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 					 unsigned long address, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	return ptep_test_and_clear_young(vma, address, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 				       unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	pte_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	if (mm_is_protected(mm) && pte_present(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			     pte_t *, pte_t, pte_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 				     unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	pte_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (mm_is_protected(vma->vm_mm) && pte_present(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  * cannot be accessed while the batched unmap is running. In this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  * full==1 and a simple pte_clear is enough. See tlb.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 					    unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 					    pte_t *ptep, int full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	pte_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	if (full) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		res = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		*ptep = __pte(_PAGE_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	if (mm_is_protected(mm) && pte_present(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) #define __HAVE_ARCH_PTEP_SET_WRPROTECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static inline void ptep_set_wrprotect(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 				      unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	pte_t pte = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	if (pte_write(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static inline int ptep_set_access_flags(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 					unsigned long addr, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 					pte_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	if (pte_same(*ptep, entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  * Additional functions to handle KVM guest page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		     pte_t *ptep, pte_t entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) void ptep_notify(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		 pte_t *ptep, unsigned long bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		    pte_t *ptep, int prot, unsigned long bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		     pte_t *ptep , int reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		    pte_t *sptep, pte_t *tptep, pte_t pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			    pte_t *ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			  unsigned char key, bool nq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			       unsigned char key, unsigned char *oldkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			       bool nq, bool mr, bool mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			  unsigned char *key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				unsigned long bits, unsigned long value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			unsigned long *oldpte, unsigned long *oldpgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) #define pgprot_writecombine	pgprot_writecombine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) pgprot_t pgprot_writecombine(pgprot_t prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) #define pgprot_writethrough	pgprot_writethrough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) pgprot_t pgprot_writethrough(pgprot_t prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)  * Certain architectures need to do special things when PTEs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)  * within a page table are directly modified.  Thus, the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)  * hook is made available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			      pte_t *ptep, pte_t entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (pte_present(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		pte_val(entry) &= ~_PAGE_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	if (mm_has_pgste(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		ptep_set_pte_at(mm, addr, ptep, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		*ptep = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * Conversion functions: convert a page and protection to a page entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * and a page entry and page directory to the page they refer to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	pte_t __pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	pte_val(__pte) = physpage | pgprot_val(pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (!MACHINE_HAS_NX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		pte_val(__pte) &= ~_PAGE_NOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	return pte_mkyoung(__pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	unsigned long physpage = page_to_phys(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	pte_t __pte = mk_pte_phys(physpage, pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (pte_write(__pte) && PageDirty(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		__pte = pte_mkdirty(__pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	return __pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static inline unsigned long pmd_deref(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	unsigned long origin_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	if (pmd_large(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	return pmd_val(pmd) & origin_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static inline unsigned long pmd_pfn(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	return pmd_deref(pmd) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static inline unsigned long pud_deref(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	unsigned long origin_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	origin_mask = _REGION_ENTRY_ORIGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	if (pud_large(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	return pud_val(pud) & origin_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static inline unsigned long pud_pfn(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	return pud_deref(pud) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)  * The pgd_offset function *always* adds the index for the top-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)  * region/segment table. This is done to get a sequence like the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)  * following to work:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)  *	pgdp = pgd_offset(current->mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  *	pgd = READ_ONCE(*pgdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)  *	p4dp = p4d_offset(&pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)  *	...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)  * The subsequent p4d_offset, pud_offset and pmd_offset functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)  * only add an index if they dereferenced the pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	unsigned long rste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	unsigned int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	/* Get the first entry of the top level table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	rste = pgd_val(*pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	/* Pick up the shift from the table type of the first entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	return (p4d_t *) pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) #define p4d_offset_lockless p4d_offset_lockless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	return p4d_offset_lockless(pgdp, *pgdp, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		return (pud_t *) p4d_deref(p4d) + pud_index(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	return (pud_t *) p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #define pud_offset_lockless pud_offset_lockless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	return pud_offset_lockless(p4dp, *p4dp, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) #define pud_offset pud_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		return (pmd_t *) pud_deref(pud) + pmd_index(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	return (pmd_t *) pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) #define pmd_offset_lockless pmd_offset_lockless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	return pmd_offset_lockless(pudp, *pudp, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) #define pmd_offset pmd_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static inline unsigned long pmd_page_vaddr(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	return (unsigned long) pmd_deref(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	return end <= current->mm->context.asce_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) #define gup_fast_permitted gup_fast_permitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) #define pte_page(x) pfn_to_page(pte_pfn(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) #define pud_page(pud) pfn_to_page(pud_pfn(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static inline pmd_t pmd_wrprotect(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static inline pmd_t pmd_mkwrite(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static inline pmd_t pmd_mkclean(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) static inline pmd_t pmd_mkdirty(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static inline pud_t pud_wrprotect(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	return pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static inline pud_t pud_mkwrite(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	pud_val(pud) |= _REGION3_ENTRY_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	return pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static inline pud_t pud_mkclean(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	return pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static inline pud_t pud_mkdirty(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	if (pud_val(pud) & _REGION3_ENTRY_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	return pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		return pgprot_val(SEGMENT_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		return pgprot_val(SEGMENT_RO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		return pgprot_val(SEGMENT_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		return pgprot_val(SEGMENT_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	return pgprot_val(SEGMENT_RWX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static inline pmd_t pmd_mkyoung(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static inline pmd_t pmd_mkold(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	pmd_t __pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	return __pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static inline void __pmdp_csp(pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) #define IDTE_GLOBAL	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) #define IDTE_LOCAL	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) #define IDTE_PTOA	0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) #define IDTE_NODAT	0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) #define IDTE_GUEST_ASCE	0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 					unsigned long opt, unsigned long asce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 					int local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	unsigned long sto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	if (__builtin_constant_p(opt) && opt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		/* flush without guest asce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			: "+m" (*pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			  [m4] "i" (local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			: "cc" );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		/* flush with guest asce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			: "+m" (*pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			  [r3] "a" (asce), [m4] "i" (local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			: "cc" );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 					unsigned long opt, unsigned long asce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 					int local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	unsigned long r3o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	r3o |= _ASCE_TYPE_REGION3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	if (__builtin_constant_p(opt) && opt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		/* flush without guest asce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 			: "+m" (*pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			  [m4] "i" (local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			: "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		/* flush with guest asce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 			: "+m" (*pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			  [r3] "a" (asce), [m4] "i" (local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			: "cc" );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) #define __HAVE_ARCH_PGTABLE_DEPOSIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 				pgtable_t pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) #define __HAVE_ARCH_PGTABLE_WITHDRAW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 					unsigned long addr, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 					pmd_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	VM_BUG_ON(addr & ~HPAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	entry = pmd_mkyoung(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	if (dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		entry = pmd_mkdirty(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	if (pmd_val(*pmdp) == pmd_val(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 					    unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	pmd_t pmd = *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	return pmd_young(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 					 unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	VM_BUG_ON(addr & ~HPAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	return pmdp_test_and_clear_young(vma, addr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 			      pmd_t *pmdp, pmd_t entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (!MACHINE_HAS_NX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	*pmdp = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static inline pmd_t pmd_mkhuge(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 					    unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 						 unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 						 pmd_t *pmdp, int full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	if (full) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		pmd_t pmd = *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 					  unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) #define __HAVE_ARCH_PMDP_INVALIDATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 				   unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) #define __HAVE_ARCH_PMDP_SET_WRPROTECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static inline void pmdp_set_wrprotect(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 				      unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	pmd_t pmd = *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	if (pmd_write(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 					unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 					pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) #define pmdp_collapse_flush pmdp_collapse_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static inline int pmd_trans_huge(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) #define has_transparent_hugepage has_transparent_hugepage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static inline int has_transparent_hugepage(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	return MACHINE_HAS_EDAT1 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)  * 64 bit swap entry format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)  * A page-table entry has some bits we have to treat in a special way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)  * Bits 52 and bit 55 have to be zero, otherwise a specification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)  * exception will occur instead of a page translation exception. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)  * specification exception has the bad habit not to store necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)  * information in the lowcore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)  * Bits 54 and 63 are used to indicate the page type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)  * This leaves the bits 0-51 and bits 56-62 to store type and offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)  * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)  * for the offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)  * |			  offset			|01100|type |00|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) #define __SWP_OFFSET_SHIFT	12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) #define __SWP_TYPE_MASK		((1UL << 5) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) #define __SWP_TYPE_SHIFT	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static inline unsigned long __swp_type(swp_entry_t entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) static inline unsigned long __swp_offset(swp_entry_t entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) #define kern_addr_valid(addr)   (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) extern int vmem_add_mapping(unsigned long start, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) extern void vmem_remove_mapping(unsigned long start, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) extern int s390_enable_sie(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) extern int s390_enable_skey(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) extern void s390_reset_cmma(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /* s390 has a private copy of get unmapped area to deal with cache synonyms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) #define HAVE_ARCH_UNMAPPED_AREA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) #endif /* _S390_PAGE_H */