Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* arch/sparc64/mm/tsb.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/tsb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/oplib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	vaddr >>= hash_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	return vaddr & (nentries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static inline int tag_compare(unsigned long tag, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	return (tag == (vaddr >> 22));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	unsigned long idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		struct tsb *ent = &swapper_tsb[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		unsigned long match = idx << 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		match |= (ent->tag << 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		if (match >= start && match < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) /* TSB flushes need only occur on the processor initiating the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * space modification, not on each cpu the address space has run on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * Only the TLB flush needs that treatment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) void flush_tsb_kernel_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	unsigned long v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		return flush_tsb_kernel_range_scan(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	for (v = start; v < end; v += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		unsigned long hash = tsb_hash(v, PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 					      KERNEL_TSB_NENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		struct tsb *ent = &swapper_tsb[hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		if (tag_compare(ent->tag, v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 				  unsigned long hash_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 				  unsigned long nentries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	unsigned long tag, ent, hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	v &= ~0x1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	hash = tsb_hash(v, hash_shift, nentries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	ent = tsb + (hash * sizeof(struct tsb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	tag = (v >> 22UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	tsb_flush(ent, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			    unsigned long tsb, unsigned long nentries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	for (i = 0; i < tb->tlb_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 				       unsigned long hash_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 				       unsigned long nentries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 				       unsigned int hugepage_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	unsigned int hpage_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	hpage_entries = 1 << (hugepage_shift - hash_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	for (i = 0; i < hpage_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		__flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 				      nentries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 				 unsigned long tsb, unsigned long nentries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 				 unsigned int hugepage_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	for (i = 0; i < tb->tlb_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		__flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 					   nentries, hugepage_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void flush_tsb_user(struct tlb_batch *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct mm_struct *mm = tb->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	unsigned long nentries, base, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	spin_lock_irqsave(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			base = __pa(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		if (tb->hugepage_shift == PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #if defined(CONFIG_HUGETLB_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			__flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 					     tb->hugepage_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			base = __pa(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		__flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 				     tb->hugepage_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	spin_unlock_irqrestore(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			 unsigned int hugepage_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	unsigned long nentries, base, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	spin_lock_irqsave(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (hugepage_shift < REAL_HPAGE_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			base = __pa(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		if (hugepage_shift == PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 					      nentries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #if defined(CONFIG_HUGETLB_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			__flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 						   nentries, hugepage_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			base = __pa(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		__flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 					   nentries, hugepage_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	spin_unlock_irqrestore(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_8K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_8K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_4MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_4MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	unsigned long tsb_reg, base, tsb_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	unsigned long page_sz, tte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	mm->context.tsb_block[tsb_idx].tsb_nentries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		tsb_bytes / sizeof(struct tsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	switch (tsb_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	case MM_TSB_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		base = TSBMAP_8K_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	case MM_TSB_HUGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		base = TSBMAP_4M_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	tte = pgprot_val(PAGE_KERNEL_LOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	/* Use the smallest page size that can map the whole TSB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	 * in one TLB entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	switch (tsb_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	case 8192 << 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		tsb_reg = 0x0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #ifdef DCACHE_ALIASING_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		base += (tsb_paddr & 8192);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		page_sz = 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	case 8192 << 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		tsb_reg = 0x1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		page_sz = 64 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	case 8192 << 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		tsb_reg = 0x2UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		page_sz = 64 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	case 8192 << 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		tsb_reg = 0x3UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		page_sz = 64 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	case 8192 << 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		tsb_reg = 0x4UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		page_sz = 512 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	case 8192 << 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		tsb_reg = 0x5UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		page_sz = 512 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	case 8192 << 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		tsb_reg = 0x6UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		page_sz = 512 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	case 8192 << 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		tsb_reg = 0x7UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		page_sz = 4 * 1024 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		       current->comm, current->pid, tsb_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		do_exit(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	tte |= pte_sz_bits(page_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		/* Physical mapping, no locked TLB entry for TSB.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		tsb_reg |= tsb_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		tsb_reg |= base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		tsb_reg |= (tsb_paddr & (page_sz - 1UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		tte |= (tsb_paddr & ~(page_sz - 1UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/* Setup the Hypervisor TSB descriptor.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		switch (tsb_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		case MM_TSB_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			hp->pgsz_idx = HV_PGSZ_IDX_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		case MM_TSB_HUGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		hp->assoc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		hp->num_ttes = tsb_bytes / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		hp->ctx_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		switch (tsb_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		case MM_TSB_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			hp->pgsz_mask = HV_PGSZ_MASK_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		case MM_TSB_HUGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		hp->tsb_base = tsb_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		hp->resv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct kmem_cache *pgtable_cache __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static struct kmem_cache *tsb_caches[8] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static const char *tsb_cache_names[8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	"tsb_8KB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	"tsb_16KB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	"tsb_32KB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	"tsb_64KB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	"tsb_128KB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	"tsb_256KB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	"tsb_512KB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	"tsb_1MB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) void __init pgtable_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	pgtable_cache = kmem_cache_create("pgtable_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 					  PAGE_SIZE, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 					  0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 					  _clear_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (!pgtable_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		prom_printf("pgtable_cache_init(): Could not create!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		unsigned long size = 8192 << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		const char *name = tsb_cache_names[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		tsb_caches[i] = kmem_cache_create(name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 						  size, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 						  0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		if (!tsb_caches[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			prom_printf("Could not create %s cache\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int sysctl_tsb_ratio = -2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	unsigned long num_ents = (new_size / sizeof(struct tsb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (sysctl_tsb_ratio < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		return num_ents - (num_ents >> -sysctl_tsb_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		return num_ents + (num_ents >> sysctl_tsb_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  * do_sparc64_fault() invokes this routine to try and grow it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  * When we reach the maximum TSB size supported, we stick ~0UL into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  * will not trigger any longer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  * of two.  The TSB must be aligned to it's size, so f.e. a 512K TSB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  * must be 512K aligned.  It also must be physically contiguous, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  * cannot use vmalloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  * The idea here is to grow the TSB when the RSS of the process approaches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  * the number of entries that the current TSB can hold at once.  Currently,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * we trigger when the RSS hits 3/4 of the TSB capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	unsigned long max_tsb_size = 1 * 1024 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	unsigned long new_size, old_size, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	struct tsb *old_tsb, *new_tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	unsigned long new_cache_index, old_cache_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	unsigned long new_rss_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	gfp_t gfp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		max_tsb_size = (PAGE_SIZE << MAX_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	new_cache_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		new_rss_limit = tsb_size_to_rss_limit(new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		if (new_rss_limit > rss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		new_cache_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	if (new_size == max_tsb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		new_rss_limit = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) retry_tsb_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	gfp_flags = GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	if (new_size > (PAGE_SIZE * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 					gfp_flags, numa_node_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (unlikely(!new_tsb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		/* Not being able to fork due to a high-order TSB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		 * allocation failure is very bad behavior.  Just back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		 * down to a 0-order allocation and force no TSB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		 * growing for this address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		if (mm->context.tsb_block[tsb_index].tsb == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		    new_cache_index > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			new_cache_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			new_size = 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			new_rss_limit = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			goto retry_tsb_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		/* If we failed on a TSB grow, we are under serious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		 * memory pressure so don't try to grow any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		if (mm->context.tsb_block[tsb_index].tsb != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	/* Mark all tags as invalid.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	tsb_init(new_tsb, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	/* Ok, we are about to commit the changes.  If we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	 * growing an existing TSB the locking is very tricky,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	 * so WATCH OUT!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	 * We have to hold mm->context.lock while committing to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	 * new TSB, this synchronizes us with processors in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	 * flush_tsb_user() and switch_mm() for this address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	 * But even with that lock held, processors run asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	 * accessing the old TSB via TLB miss handling.  This is OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	 * because those actions are just propagating state from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	 * Linux page tables into the TSB, page table mappings are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	 * being changed.  If a real fault occurs, the processor will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	 * synchronize with us when it hits flush_tsb_user(), this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	 * also true for the case where vmscan is modifying the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	 * tables.  The only thing we need to be careful with is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	 * skip any locked TSB entries during copy_tsb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	 * When we finish committing to the new TSB, we have to drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	 * the lock and ask all other cpus running this address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	 * to run tsb_context_switch() to see the new TSB table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	spin_lock_irqsave(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	old_tsb = mm->context.tsb_block[tsb_index].tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	old_cache_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		    sizeof(struct tsb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	/* Handle multiple threads trying to grow the TSB at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	 * One will get in here first, and bump the size and the RSS limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	 * The others will get in here next and hit this check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	if (unlikely(old_tsb &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		     (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		spin_unlock_irqrestore(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	if (old_tsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		extern void copy_tsb(unsigned long old_tsb_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 				     unsigned long old_tsb_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 				     unsigned long new_tsb_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 				     unsigned long new_tsb_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 				     unsigned long page_size_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		unsigned long old_tsb_base = (unsigned long) old_tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		unsigned long new_tsb_base = (unsigned long) new_tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 			old_tsb_base = __pa(old_tsb_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			new_tsb_base = __pa(new_tsb_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			tsb_index == MM_TSB_BASE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			PAGE_SHIFT : REAL_HPAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	mm->context.tsb_block[tsb_index].tsb = new_tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	setup_tsb_params(mm, tsb_index, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	spin_unlock_irqrestore(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	/* If old_tsb is NULL, we're being invoked for the first time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	 * from init_new_context().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	if (old_tsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		/* Reload it on the local cpu.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		tsb_context_switch(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		/* Now force other processors to do the same.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		smp_tsb_sync(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		/* Now it is safe to free the old tsb.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	unsigned long mm_rss = get_mm_rss(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	unsigned long saved_hugetlb_pte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	unsigned long saved_thp_pte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	spin_lock_init(&mm->context.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	mm->context.sparc64_ctx_val = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	mm->context.tag_store = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	spin_lock_init(&mm->context.tag_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	/* We reset them to zero because the fork() page copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	 * will re-increment the counters as the parent PTEs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	 * copied into the child address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	saved_thp_pte_count = mm->context.thp_pte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	mm->context.hugetlb_pte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	mm->context.thp_pte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	/* copy_mm() copies over the parent's mm_struct before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	 * us, so we need to zero out the TSB pointer or else tsb_grow()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	 * will be confused and think there is an older TSB to free up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	for (i = 0; i < MM_NUM_TSBS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		mm->context.tsb_block[i].tsb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	/* If this is fork, inherit the parent's TSB size.  We would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	 * grow it to that size on the first page fault anyways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	tsb_grow(mm, MM_TSB_BASE, mm_rss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		tsb_grow(mm, MM_TSB_HUGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 			 (saved_hugetlb_pte_count + saved_thp_pte_count) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 			 REAL_HPAGE_PER_HPAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static void tsb_destroy_one(struct tsb_config *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	unsigned long cache_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	if (!tp->tsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	cache_index = tp->tsb_reg_val & 0x7UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	kmem_cache_free(tsb_caches[cache_index], tp->tsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	tp->tsb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	tp->tsb_reg_val = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) void destroy_context(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	unsigned long flags, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	for (i = 0; i < MM_NUM_TSBS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		tsb_destroy_one(&mm->context.tsb_block[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	spin_lock_irqsave(&ctx_alloc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	if (CTX_VALID(mm->context)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		unsigned long nr = CTX_NRBITS(mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	spin_unlock_irqrestore(&ctx_alloc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	/* If ADI tag storage was allocated for this task, free it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	if (mm->context.tag_store) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		tag_storage_desc_t *tag_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		unsigned long max_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		unsigned char *tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		tag_desc = mm->context.tag_store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		for (i = 0; i < max_desc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 			tags = tag_desc->tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 			tag_desc->tags = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 			kfree(tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 			tag_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		kfree(mm->context.tag_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		mm->context.tag_store = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }