^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based upon linux/arch/m68k/mm/sun3mmu.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Based upon linux/arch/ppc/mm/mmu_context.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Implementations of mm routines specific to the Coldfire MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (c) 2008 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/mcf_pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) mm_context_t next_mmu_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) atomic_t nr_free_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct mm_struct *context_mm[LAST_CONTEXT+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * ColdFire paging_init derived from sun3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void __init paging_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) pgd_t *pg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) pte_t *pg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long address, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long next_pgtable, bootmem_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (!empty_zero_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __func__, PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) pg_dir = swapper_pg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) size = num_pages * sizeof(pte_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (!next_pgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __func__, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) address = PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) while (address < (unsigned long)high_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pg_table = (pte_t *) next_pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) pgd_val(*pg_dir) = (unsigned long) pg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pg_dir++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* now change pg_table to kernel virtual addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (address >= (unsigned long) high_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) pte_val(pte) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) set_pte(pg_table, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) address += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) current->mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) free_area_init(max_zone_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long flags, mmuar, mmutr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) mmuar = (dtlb) ? mmu_read(MMUAR) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) regs->pc + (extension_word * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) pgd = pgd_offset(mm, mmuar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (pgd_none(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) p4d = p4d_offset(pgd, mmuar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (p4d_none(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) pud = pud_offset(p4d, mmuar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (pud_none(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) pmd = pmd_offset(pud, mmuar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (pmd_none(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) : pte_offset_map(pmd, mmuar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (pte_none(*pte) || !pte_present(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!pte_write(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) set_pte(pte, pte_mkdirty(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) set_pte(pte, pte_mkyoung(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) asid = mm->context & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) set_pte(pte, pte_wrprotect(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mmu_write(MMUTR, mmutr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (dtlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void __init cf_bootmem_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long memstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* _rambase and _ramend will be naturally page aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) m68k_memory[0].addr = _rambase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) m68k_memory[0].size = _ramend - _rambase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* compute total pages in system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) num_pages = PFN_DOWN(_ramend - _rambase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* page numbers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) memstart = PAGE_ALIGN(_ramstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) min_low_pfn = PFN_DOWN(_rambase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) max_pfn = max_low_pfn = PFN_DOWN(_ramend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) high_memory = (void *)_ramend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Reserve kernel text/data/bss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) memblock_reserve(_rambase, memstart - _rambase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) module_fixup(NULL, __start_fixup, __stop_fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* setup node data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) m68k_setup_node(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Initialize the context management stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * The following was taken from arch/ppc/mmu_context.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) void __init cf_mmu_context_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Some processors have too few contexts to reserve one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * init_mm, and require using context 0 for a normal task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Other processors reserve the use of context zero for the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * This code assumes FIRST_CONTEXT < 32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) context_map[0] = (1 << FIRST_CONTEXT) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) next_mmu_context = FIRST_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Steal a context from a task that has one at the moment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * This isn't an LRU system, it just frees up each context in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * turn (sort-of pseudo-random replacement :). This would be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * place to implement an LRU scheme if anyone was motivated to do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * -- paulus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) void steal_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * free up context `next_mmu_context'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * if we shouldn't free context 0, don't...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (next_mmu_context < FIRST_CONTEXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) next_mmu_context = FIRST_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mm = context_mm[next_mmu_context];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) flush_tlb_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) destroy_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)