^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2014 Davidlohr Bueso.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/vmacache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Hash based on the pmd of addr if configured with MMU, which provides a good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * hit rate for workloads with spatial locality. Otherwise, use pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define VMACACHE_SHIFT PMD_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define VMACACHE_SHIFT PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * This task may be accessing a foreign mm via (for example)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * get_user_pages()->find_vma(). The vmacache is task-local and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * task's vmacache pertains to a different mm (ie, its own). There is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * nothing we can do here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Also handle the case where a kernel thread has adopted this mm via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * kthread_use_mm(). That kernel thread's vmacache is not applicable to this mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline bool vmacache_valid_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return current->mm == mm && !(current->flags & PF_KTHREAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (vmacache_valid_mm(newvma->vm_mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static bool vmacache_valid(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct task_struct *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (!vmacache_valid_mm(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) curr = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * First attempt will always be invalid, initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * the new cache for this task here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) curr->vmacache.seqnum = mm->vmacache_seqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) vmacache_flush(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int idx = VMACACHE_HASH(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) count_vm_vmacache_event(VMACACHE_FIND_CALLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!vmacache_valid(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) for (i = 0; i < VMACACHE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct vm_area_struct *vma = current->vmacache.vmas[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #ifdef CONFIG_DEBUG_VM_VMACACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (WARN_ON_ONCE(vma->vm_mm != mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (vma->vm_start <= addr && vma->vm_end > addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) count_vm_vmacache_event(VMACACHE_FIND_HITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (++idx == VMACACHE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int idx = VMACACHE_HASH(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) count_vm_vmacache_event(VMACACHE_FIND_CALLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!vmacache_valid(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) for (i = 0; i < VMACACHE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct vm_area_struct *vma = current->vmacache.vmas[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (vma && vma->vm_start == start && vma->vm_end == end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) count_vm_vmacache_event(VMACACHE_FIND_HITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (++idx == VMACACHE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif