Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) ** Tablewalk MMU emulator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) ** by Toshiyasu Morita
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) ** Started 1/16/98 @ 2:22 am
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/sun3mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/segment.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/oplib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/dvma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #undef DEBUG_MMU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define DEBUG_PROM_MAPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) ** Defines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define CONTEXTS_NUM		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define SEGMAPS_PER_CONTEXT_NUM 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define PAGES_PER_SEGMENT	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define PMEGS_NUM		256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define PMEG_MASK		0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) ** Globals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) unsigned long m68k_vmalloc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) EXPORT_SYMBOL(m68k_vmalloc_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) unsigned long pmeg_vaddr[PMEGS_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) unsigned char pmeg_alloc[PMEGS_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) unsigned char pmeg_ctx[PMEGS_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /* pointers to the mm structs for each task in each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)    context. 0xffffffff is a marker for kernel context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static struct mm_struct *ctx_alloc[CONTEXTS_NUM] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)     [0] = (struct mm_struct *)0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) /* has this context been mmdrop'd? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static unsigned char ctx_avail = CONTEXTS_NUM-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) /* array of pages to be marked off for the rom when we do mem_init later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /* 256 pages lets the rom take up to 2mb of physical ram..  I really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)    hope it never wants mote than that. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) unsigned long rom_pages[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) /* Print a PTE value in symbolic form. For debugging. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) void print_pte (pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* Verbose version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	unsigned long val = pte_val (pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	pr_cont(" pte=%lx [addr=%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (val & SUN3_PAGE_VALID)	pr_cont(" valid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (val & SUN3_PAGE_WRITEABLE)	pr_cont(" write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	if (val & SUN3_PAGE_SYSTEM)	pr_cont(" sys");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (val & SUN3_PAGE_NOCACHE)	pr_cont(" nocache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (val & SUN3_PAGE_ACCESSED)	pr_cont(" accessed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (val & SUN3_PAGE_MODIFIED)	pr_cont(" modified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	switch (val & SUN3_PAGE_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		case SUN3_PAGE_TYPE_MEMORY: pr_cont(" memory"); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		case SUN3_PAGE_TYPE_IO:     pr_cont(" io");     break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		case SUN3_PAGE_TYPE_VME16:  pr_cont(" vme16");  break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		case SUN3_PAGE_TYPE_VME32:  pr_cont(" vme32");  break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	pr_cont("]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	/* Terse version. More likely to fit on a line. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	unsigned long val = pte_val (pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	char flags[7], *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	flags[0] = (val & SUN3_PAGE_VALID)     ? 'v' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	flags[1] = (val & SUN3_PAGE_WRITEABLE) ? 'w' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	flags[2] = (val & SUN3_PAGE_SYSTEM)    ? 's' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	flags[3] = (val & SUN3_PAGE_NOCACHE)   ? 'x' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	flags[4] = (val & SUN3_PAGE_ACCESSED)  ? 'a' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	flags[5] = (val & SUN3_PAGE_MODIFIED)  ? 'm' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	flags[6] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	switch (val & SUN3_PAGE_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		case SUN3_PAGE_TYPE_MEMORY: type = "memory"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		case SUN3_PAGE_TYPE_IO:     type = "io"    ; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		case SUN3_PAGE_TYPE_VME16:  type = "vme16" ; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		case SUN3_PAGE_TYPE_VME32:  type = "vme32" ; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		default: type = "unknown?"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	pr_cont(" pte=%08lx [%07lx %s %s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT, flags, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Print the PTE value for a given virtual address. For debugging. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void print_pte_vaddr (unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	print_pte (__pte (sun3_get_pte (vaddr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * Initialise the MMU emulator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void __init mmu_emu_init(unsigned long bootmem_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	unsigned long seg, num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	int i,j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	memset(rom_pages, 0, sizeof(rom_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	memset(pmeg_vaddr, 0, sizeof(pmeg_vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	memset(pmeg_alloc, 0, sizeof(pmeg_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	memset(pmeg_ctx, 0, sizeof(pmeg_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	/* pmeg align the end of bootmem, adding another pmeg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 * later bootmem allocations will likely need it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	bootmem_end = (bootmem_end + (2 * SUN3_PMEG_SIZE)) & ~SUN3_PMEG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	/* mark all of the pmegs used thus far as reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	for (i=0; i < __pa(bootmem_end) / SUN3_PMEG_SIZE ; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		pmeg_alloc[i] = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/* I'm thinking that most of the top pmeg's are going to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	   used for something, and we probably shouldn't risk it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	for(num = 0xf0; num <= 0xff; num++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		pmeg_alloc[num] = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	/* liberate all existing mappings in the rest of kernel space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	for(seg = bootmem_end; seg < 0x0f800000; seg += SUN3_PMEG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		i = sun3_get_segmap(seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		if(!pmeg_alloc[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #ifdef DEBUG_MMU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			pr_info("freed:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 			print_pte_vaddr (seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			sun3_put_segmap(seg, SUN3_INVALID_PMEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #ifdef DEBUG_PROM_MAPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			for(i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 				pr_info("mapped:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 				print_pte_vaddr (seg + (i*PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			// the lowest mapping here is the end of our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			// vmalloc region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			if (!m68k_vmalloc_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 				m68k_vmalloc_end = seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			// mark the segmap alloc'd, and reserve any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			// of the first 0xbff pages the hardware is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			// already using...  does any sun3 support > 24mb?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			pmeg_alloc[sun3_get_segmap(seg)] = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	dvma_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	/* blank everything below the kernel, and we've got the base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	   mapping to start all the contexts off with... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		sun3_put_segmap(seg, SUN3_INVALID_PMEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	set_fs(MAKE_MM_SEG(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		i = sun3_get_segmap(seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		for(j = 1; j < CONTEXTS_NUM; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			(*(romvec->pv_setctxt))(j, (void *)seg, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	set_fs(KERNEL_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* erase the mappings for a dead context.  Uses the pg_dir for hints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)    as the pmeg tables proved somewhat unreliable, and unmapping all of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)    TASK_SIZE was much slower and no more stable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* todo: find a better way to keep track of the pmegs used by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)    context for when they're cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void clear_context(unsigned long context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)      unsigned char oldctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)      unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)      if(context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	     if(!ctx_alloc[context])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		     panic("clear_context: context not allocated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	     ctx_alloc[context]->context = SUN3_INVALID_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	     ctx_alloc[context] = (struct mm_struct *)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	     ctx_avail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)      }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)      oldctx = sun3_get_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)      sun3_put_context(context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)      for(i = 0; i < SUN3_INVALID_PMEG; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	     if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		     sun3_put_segmap(pmeg_vaddr[i], SUN3_INVALID_PMEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		     pmeg_ctx[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		     pmeg_alloc[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		     pmeg_vaddr[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	     }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)      }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)      sun3_put_context(oldctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* gets an empty context.  if full, kills the next context listed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)    die first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* This context invalidation scheme is, well, totally arbitrary, I'm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)    sure it could be much more intelligent...  but it gets the job done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)    for now without much overhead in making it's decision. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* todo: come up with optimized scheme for flushing contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) unsigned long get_free_context(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	unsigned long new = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	static unsigned char next_to_die = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if(!ctx_avail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		/* kill someone to get our context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		new = next_to_die;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		clear_context(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		next_to_die = (next_to_die + 1) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		if(!next_to_die)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			next_to_die++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		while(new < CONTEXTS_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			if(ctx_alloc[new])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				new++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		// check to make sure one was really free...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		if(new == CONTEXTS_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			panic("get_free_context: failed to find free context");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	ctx_alloc[new] = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	ctx_avail--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * Dynamically select a `spare' PMEG and use it to map virtual `vaddr' in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * `context'. Maintain internal PMEG management structures. This doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * actually map the physical address, but does clear the old mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) //todo: better allocation scheme? but is extra complexity worthwhile?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) //todo: only clear old entries if necessary? how to tell?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) inline void mmu_emu_map_pmeg (int context, int vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	static unsigned char curr_pmeg = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	/* Round address to PMEG boundary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	vaddr &= ~SUN3_PMEG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	/* Find a spare one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	while (pmeg_alloc[curr_pmeg] == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		++curr_pmeg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #ifdef DEBUG_MMU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	pr_info("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		curr_pmeg, context, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	/* Invalidate old mapping for the pmeg, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (pmeg_alloc[curr_pmeg] == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		sun3_put_context(pmeg_ctx[curr_pmeg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		sun3_put_segmap (pmeg_vaddr[curr_pmeg], SUN3_INVALID_PMEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		sun3_put_context(context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	/* Update PMEG management structures. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	// don't take pmeg's away from the kernel...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if(vaddr >= PAGE_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		/* map kernel pmegs into all contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		unsigned char i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		for(i = 0; i < CONTEXTS_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			sun3_put_context(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			sun3_put_segmap (vaddr, curr_pmeg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		sun3_put_context(context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		pmeg_alloc[curr_pmeg] = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		pmeg_ctx[curr_pmeg] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		pmeg_alloc[curr_pmeg] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		pmeg_ctx[curr_pmeg] = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		sun3_put_segmap (vaddr, curr_pmeg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	pmeg_vaddr[curr_pmeg] = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	/* Set hardware mapping and clear the old PTE entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	for (i=0; i<SUN3_PMEG_SIZE; i+=SUN3_PTE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	/* Consider a different one next time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	++curr_pmeg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * Handle a pagefault at virtual address `vaddr'; check if there should be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * page there (specifically, whether the software pagetables indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  * there is). This is necessary due to the limited size of the second-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * Sun3 hardware pagetables (256 groups of 16 pages). If there should be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * mapping present, we select a `spare' PMEG and use it to create a mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * `read_flag' is nonzero for a read fault; zero for a write. Returns nonzero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * if we successfully handled the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) //todo: should we bump minor pagefault counter? if so, here or in caller?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) //todo: possibly inline this into bus_error030 in <asm/buserror.h> ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) // kernel_fault is set when a kernel page couldn't be demand mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) // and forces another try using the kernel page table.  basically a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) // hack so that vmalloc would work correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	unsigned long segment, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	unsigned char context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	pgd_t * crp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	if(current->mm == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		crp = swapper_pg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		context = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		context = current->mm->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		if(kernel_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			crp = swapper_pg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			crp = current->mm->pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #ifdef DEBUG_MMU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	pr_info("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		vaddr, read_flag ? "read" : "write", crp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	offset  = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #ifdef DEBUG_MMU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	pr_info("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	pte = (pte_t *) pgd_val (*(crp + segment));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) //todo: next line should check for valid pmd properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (!pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) //                pr_info("mmu_emu_handle_fault: invalid pmd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)                 return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	pte = (pte_t *) __va ((unsigned long)(pte + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	/* Make sure this is a valid page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if (!(pte_val (*pte) & SUN3_PAGE_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	/* Make sure there's a pmeg allocated for the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		mmu_emu_map_pmeg (context, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	/* Write the pte value to hardware MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	/* Update software copy of the pte value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) // I'm not sure this is necessary. If this is required, we ought to simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) // copy this out when we reuse the PMEG or at some other convenient time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) // Doing it here is fairly meaningless, anyway, as we only know about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) // first access to a given page. --m
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	if (!read_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		if (pte_val (*pte) & SUN3_PAGE_WRITEABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			pte_val (*pte) |= (SUN3_PAGE_ACCESSED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 					   | SUN3_PAGE_MODIFIED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			return 0;	/* Write-protect error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		pte_val (*pte) |= SUN3_PAGE_ACCESSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #ifdef DEBUG_MMU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	pr_info("seg:%ld crp:%p ->", get_fs().seg, crp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	print_pte_vaddr (vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }