^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/entry-kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) if (ti_work & _TIF_SIGPENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) kvm_handle_signal_exit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) if (ti_work & _TIF_NEED_RESCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) if (ti_work & _TIF_NOTIFY_RESUME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) tracehook_notify_resume(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) rseq_handle_notify_resume(NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) ti_work = READ_ONCE(current_thread_info()->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) } while (ti_work & XFER_TO_GUEST_MODE_WORK || need_resched());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long ti_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * This is invoked from the outer guest loop with interrupts and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * preemption enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * KVM invokes xfer_to_guest_mode_work_pending() with interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * disabled in the inner loop before going into guest mode. No need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * to disable interrupts here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ti_work = READ_ONCE(current_thread_info()->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (!(ti_work & XFER_TO_GUEST_MODE_WORK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return xfer_to_guest_mode_work(vcpu, ti_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) EXPORT_SYMBOL_GPL(xfer_to_guest_mode_handle_work);