Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * kvm asynchronous fault support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright 2010 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *      Gleb Natapov <gleb@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "async_pf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <trace/events/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static struct kmem_cache *async_pf_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) int kvm_async_pf_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	if (!async_pf_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) void kvm_async_pf_deinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	kmem_cache_destroy(async_pf_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	async_pf_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	INIT_LIST_HEAD(&vcpu->async_pf.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	INIT_LIST_HEAD(&vcpu->async_pf.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	spin_lock_init(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static void async_pf_execute(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct kvm_async_pf *apf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		container_of(work, struct kvm_async_pf, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct mm_struct *mm = apf->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct kvm_vcpu *vcpu = apf->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	unsigned long addr = apf->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	gpa_t cr2_or_gpa = apf->cr2_or_gpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	int locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	bool first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 * This work is run asynchronously to the task which owns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * mm and might be done in another context, so we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 * access remotely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 			&locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		kvm_arch_async_page_present(vcpu, apf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	spin_lock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	first = list_empty(&vcpu->async_pf.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	list_add_tail(&apf->link, &vcpu->async_pf.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	apf->vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	spin_unlock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		kvm_arch_async_page_present_queued(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	 * apf may be freed by kvm_check_async_pf_completion() after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 * this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	trace_kvm_async_pf_completed(addr, cr2_or_gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	rcuwait_wake_up(&vcpu->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	kvm_put_kvm(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	spin_lock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	/* cancel outstanding work queue item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	while (!list_empty(&vcpu->async_pf.queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		struct kvm_async_pf *work =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			list_first_entry(&vcpu->async_pf.queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 					 typeof(*work), queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		list_del(&work->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		 * We know it's present in vcpu->async_pf.done, do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		 * nothing here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		if (!work->vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		spin_unlock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifdef CONFIG_KVM_ASYNC_PF_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		flush_work(&work->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		if (cancel_work_sync(&work->work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			mmput(work->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			kmem_cache_free(async_pf_cache, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		spin_lock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	while (!list_empty(&vcpu->async_pf.done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		struct kvm_async_pf *work =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			list_first_entry(&vcpu->async_pf.done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 					 typeof(*work), link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		list_del(&work->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		kmem_cache_free(async_pf_cache, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	spin_unlock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	vcpu->async_pf.queued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct kvm_async_pf *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	while (!list_empty_careful(&vcpu->async_pf.done) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	      kvm_arch_can_dequeue_async_page_present(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		spin_lock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 					      link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		list_del(&work->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		spin_unlock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		kvm_arch_async_page_ready(vcpu, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			kvm_arch_async_page_present(vcpu, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		list_del(&work->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		vcpu->async_pf.queued--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		kmem_cache_free(async_pf_cache, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * success, 'false' on failure (page fault has to be handled synchronously).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			unsigned long hva, struct kvm_arch_async_pf *arch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct kvm_async_pf *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	/* Arch specific code should not do async PF in this case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (unlikely(kvm_is_error_hva(hva)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 * do alloc nowait since if we are going to sleep anyway we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	 * may as well sleep faulting in page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (!work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	work->wakeup_all = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	work->vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	work->cr2_or_gpa = cr2_or_gpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	work->addr = hva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	work->arch = *arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	work->mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	mmget(work->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	kvm_get_kvm(work->vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	INIT_WORK(&work->work, async_pf_execute);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	list_add_tail(&work->queue, &vcpu->async_pf.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	vcpu->async_pf.queued++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	schedule_work(&work->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	struct kvm_async_pf *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	bool first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (!list_empty_careful(&vcpu->async_pf.done))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (!work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	work->wakeup_all = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	spin_lock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	first = list_empty(&vcpu->async_pf.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	list_add_tail(&work->link, &vcpu->async_pf.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	spin_unlock(&vcpu->async_pf.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		kvm_arch_async_page_present_queued(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	vcpu->async_pf.queued++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }