^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Kernel-based Virtual Machine driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * AMD SVM-SEV support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2010 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kvm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/psp-sev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "x86.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "svm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static int sev_flush_asids(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static DECLARE_RWSEM(sev_deactivate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static DEFINE_MUTEX(sev_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned int max_sev_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static unsigned int min_sev_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static unsigned long *sev_asid_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static unsigned long *sev_reclaim_asid_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct enc_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int sev_flush_asids(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int ret, error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * so it must be guarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) down_write(&sev_deactivate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) wbinvd_on_all_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ret = sev_guest_df_flush(&error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) up_write(&sev_deactivate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Must be called with the sev_bitmap_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static bool __sev_recycle_asids(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Check if there are any ASIDs to reclaim before performing a flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pos = find_next_bit(sev_reclaim_asid_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) max_sev_asid, min_sev_asid - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (pos >= max_sev_asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (sev_flush_asids())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) max_sev_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int sev_asid_new(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) bool retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) mutex_lock(&sev_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (pos >= max_sev_asid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (retry && __sev_recycle_asids()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) mutex_unlock(&sev_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __set_bit(pos, sev_asid_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) mutex_unlock(&sev_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return pos + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static int sev_get_asid(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return sev->asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void sev_asid_free(int asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct svm_cpu_data *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int cpu, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) mutex_lock(&sev_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pos = asid - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __set_bit(pos, sev_reclaim_asid_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) sd = per_cpu(svm_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sd->sev_vmcbs[asid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) mutex_unlock(&sev_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void sev_decommission(unsigned int handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct sev_data_decommission *decommission;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!decommission)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) decommission->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) sev_guest_decommission(decommission, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) kfree(decommission);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct sev_data_deactivate *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* deactivate handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) data->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) down_read(&sev_deactivate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) sev_guest_deactivate(data, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) up_read(&sev_deactivate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) sev_decommission(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int asid, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (kvm->created_vcpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (unlikely(sev->active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) asid = sev_asid_new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (asid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = sev_platform_init(&argp->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) sev->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) sev->asid = asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) INIT_LIST_HEAD(&sev->regions_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) sev_asid_free(asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct sev_data_activate *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int asid = sev_get_asid(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* activate ASID on the given handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) data->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) data->asid = asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ret = sev_guest_activate(data, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int __sev_issue_cmd(int fd, int id, void *data, int *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) f = fdget(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ret = sev_issue_cmd_external_user(f.file, id, data, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return __sev_issue_cmd(sev->fd, id, data, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct sev_data_launch_start *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct kvm_sev_launch_start params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void *dh_blob, *session_blob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int *error = &argp->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!sev_guest(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dh_blob = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (params.dh_uaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (IS_ERR(dh_blob)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ret = PTR_ERR(dh_blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) start->dh_cert_address = __sme_set(__pa(dh_blob));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) start->dh_cert_len = params.dh_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) session_blob = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (params.session_uaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (IS_ERR(session_blob)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ret = PTR_ERR(session_blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) goto e_free_dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) start->session_address = __sme_set(__pa(session_blob));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) start->session_len = params.session_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) start->handle = params.handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) start->policy = params.policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* create memory encryption context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) goto e_free_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* Bind ASID to this guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ret = sev_bind_asid(kvm, start->handle, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) sev_decommission(start->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) goto e_free_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* return handle to userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) params.handle = start->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) sev_unbind_asid(kvm, start->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto e_free_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) sev->handle = start->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) sev->fd = argp->sev_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) e_free_session:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) kfree(session_blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) e_free_dh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) kfree(dh_blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) kfree(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) unsigned long ulen, unsigned long *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned long npages, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int npinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unsigned long locked, lock_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long first, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) lockdep_assert_held(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (ulen == 0 || uaddr + ulen < uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Calculate number of pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) npages = (last - first + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) locked = sev->pages_locked + npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (WARN_ON_ONCE(npages > INT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Avoid using vmalloc for smaller buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) size = npages * sizeof(struct page *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (size > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Pin the user virtual address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (npinned != npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pr_err("SEV: Failure locking %lu pages.\n", npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) *n = npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) sev->pages_locked = locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (npinned > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) unpin_user_pages(pages, npinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned long npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unpin_user_pages(pages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) sev->pages_locked -= npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static void sev_clflush_pages(struct page *pages[], unsigned long npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) uint8_t *page_virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) pages == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) for (i = 0; i < npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) page_virtual = kmap_atomic(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) clflush_cache_range(page_virtual, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kunmap_atomic(page_virtual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static unsigned long get_num_contig_pages(unsigned long idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct page **inpages, unsigned long npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) unsigned long paddr, next_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) unsigned long i = idx + 1, pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* find the number of contiguous pages starting from idx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) paddr = __sme_page_pa(inpages[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) while (i < npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) next_paddr = __sme_page_pa(inpages[i++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if ((paddr + PAGE_SIZE) == next_paddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) paddr = next_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct kvm_sev_launch_update_data params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct sev_data_launch_update_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct page **inpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (!sev_guest(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) vaddr = params.uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) size = params.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) vaddr_end = vaddr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* Lock the user memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (IS_ERR(inpages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ret = PTR_ERR(inpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * place; the cache may contain the data that was written unencrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) sev_clflush_pages(inpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int offset, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * If the user buffer is not page-aligned, calculate the offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * within the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) offset = vaddr & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* Calculate the number of pages that can be encrypted in one go. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pages = get_num_contig_pages(i, inpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) data->handle = sev->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) data->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) data->address = __sme_page_pa(inpages[i]) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) goto e_unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) size -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) next_vaddr = vaddr + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) e_unpin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* content of memory is updated, mark pages dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) for (i = 0; i < npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) set_page_dirty_lock(inpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mark_page_accessed(inpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* unlock the user pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) sev_unpin_memory(kvm, inpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) void __user *measure = (void __user *)(uintptr_t)argp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct sev_data_launch_measure *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct kvm_sev_launch_measure params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) void __user *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) void *blob = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (!sev_guest(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (copy_from_user(¶ms, measure, sizeof(params)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* User wants to query the blob length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (!params.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) p = (void __user *)(uintptr_t)params.uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (params.len > SEV_FW_BLOB_MAX_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) blob = kmalloc(params.len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!blob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) data->address = __psp_pa(blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) data->len = params.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) data->handle = sev->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * If we query the session length, FW responded with expected data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!params.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) goto e_free_blob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (blob) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (copy_to_user(p, blob, params.len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) params.len = data->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (copy_to_user(measure, ¶ms, sizeof(params)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) e_free_blob:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) kfree(blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct sev_data_launch_finish *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (!sev_guest(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) data->handle = sev->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct kvm_sev_guest_status params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct sev_data_guest_status *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (!sev_guest(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) data->handle = sev->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) params.policy = data->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) params.state = data->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) params.handle = data->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) unsigned long dst, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int *error, bool enc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct sev_data_dbg *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) data->handle = sev->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) data->dst_addr = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) data->src_addr = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) data->len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ret = sev_issue_cmd(kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) data, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) unsigned long dst_paddr, int sz, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Its safe to read more than we are asked, caller should ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * destination has enough space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) offset = src_paddr & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) src_paddr = round_down(src_paddr, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) sz = round_up(sz + offset, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) unsigned long __user dst_uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) unsigned long dst_paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int size, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct page *tpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) int ret, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* if inputs are not 16-byte then use intermediate buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!IS_ALIGNED(dst_paddr, 16) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) !IS_ALIGNED(paddr, 16) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) !IS_ALIGNED(size, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) tpage = (void *)alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!tpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dst_paddr = __sme_page_pa(tpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (tpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) offset = paddr & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) page_address(tpage) + offset, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (tpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) __free_page(tpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) unsigned long __user vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) unsigned long dst_paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) unsigned long __user dst_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int size, int *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct page *src_tpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct page *dst_tpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int ret, len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* If source buffer is not aligned then use an intermediate buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!IS_ALIGNED(vaddr, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) src_tpage = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (!src_tpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (copy_from_user(page_address(src_tpage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) (void __user *)(uintptr_t)vaddr, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) __free_page(src_tpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) paddr = __sme_page_pa(src_tpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * If destination buffer or length is not aligned then do read-modify-write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * - decrypt destination in an intermediate buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * - copy the source buffer in an intermediate buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * - use the intermediate buffer as source buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) int dst_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) dst_tpage = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (!dst_tpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ret = __sev_dbg_decrypt(kvm, dst_paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) __sme_page_pa(dst_tpage), size, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * If source is kernel buffer then use memcpy() otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * copy_from_user().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) dst_offset = dst_paddr & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (src_tpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) memcpy(page_address(dst_tpage) + dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) page_address(src_tpage), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (copy_from_user(page_address(dst_tpage) + dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) (void __user *)(uintptr_t)vaddr, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) paddr = __sme_page_pa(dst_tpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dst_paddr = round_down(dst_paddr, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) len = round_up(size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (src_tpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) __free_page(src_tpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (dst_tpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) __free_page(dst_tpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) unsigned long vaddr, vaddr_end, next_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) unsigned long dst_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct page **src_p, **dst_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct kvm_sev_dbg debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) unsigned long n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (!sev_guest(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!debug.dst_uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) vaddr = debug.src_uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) size = debug.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) vaddr_end = vaddr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dst_vaddr = debug.dst_uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) for (; vaddr < vaddr_end; vaddr = next_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int len, s_off, d_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* lock userspace source and destination page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (IS_ERR(src_p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return PTR_ERR(src_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (IS_ERR(dst_p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) sev_unpin_memory(kvm, src_p, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return PTR_ERR(dst_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * the pages; flush the destination too so that future accesses do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * see stale data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) sev_clflush_pages(src_p, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) sev_clflush_pages(dst_p, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * Since user buffer may not be page aligned, calculate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * offset within the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) s_off = vaddr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) d_off = dst_vaddr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) len = min_t(size_t, (PAGE_SIZE - s_off), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (dec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ret = __sev_dbg_decrypt_user(kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) __sme_page_pa(src_p[0]) + s_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dst_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) __sme_page_pa(dst_p[0]) + d_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) len, &argp->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret = __sev_dbg_encrypt_user(kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) __sme_page_pa(src_p[0]) + s_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) __sme_page_pa(dst_p[0]) + d_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dst_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) len, &argp->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) sev_unpin_memory(kvm, src_p, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sev_unpin_memory(kvm, dst_p, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) next_vaddr = vaddr + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dst_vaddr = dst_vaddr + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) size -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct sev_data_launch_secret *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct kvm_sev_launch_secret params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) void *blob, *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) unsigned long n, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int ret, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (!sev_guest(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (IS_ERR(pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return PTR_ERR(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * place; the cache may contain the data that was written unencrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) sev_clflush_pages(pages, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * The secret must be copied into contiguous memory region, lets verify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * that userspace memory pages are contiguous before we issue command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (get_num_contig_pages(0, pages, n) != n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) goto e_unpin_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) goto e_unpin_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) offset = params.guest_uaddr & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) data->guest_address = __sme_page_pa(pages[0]) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) data->guest_len = params.guest_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (IS_ERR(blob)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ret = PTR_ERR(blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) data->trans_address = __psp_pa(blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) data->trans_len = params.trans_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (IS_ERR(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = PTR_ERR(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) goto e_free_blob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) data->hdr_address = __psp_pa(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) data->hdr_len = params.hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) data->handle = sev->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) kfree(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) e_free_blob:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) kfree(blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) e_unpin_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* content of memory is updated, mark pages dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) set_page_dirty_lock(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) mark_page_accessed(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) sev_unpin_memory(kvm, pages, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct kvm_sev_cmd sev_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (!svm_sev_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (!argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) switch (sev_cmd.id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case KVM_SEV_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) r = sev_guest_init(kvm, &sev_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) case KVM_SEV_LAUNCH_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) r = sev_launch_start(kvm, &sev_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) case KVM_SEV_LAUNCH_UPDATE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) r = sev_launch_update_data(kvm, &sev_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) case KVM_SEV_LAUNCH_MEASURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) r = sev_launch_measure(kvm, &sev_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case KVM_SEV_LAUNCH_FINISH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) r = sev_launch_finish(kvm, &sev_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) case KVM_SEV_GUEST_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) r = sev_guest_status(kvm, &sev_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) case KVM_SEV_DBG_DECRYPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) r = sev_dbg_crypt(kvm, &sev_cmd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) case KVM_SEV_DBG_ENCRYPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) r = sev_dbg_crypt(kvm, &sev_cmd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) case KVM_SEV_LAUNCH_SECRET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) r = sev_launch_secret(kvm, &sev_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) int svm_register_enc_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct kvm_enc_region *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct enc_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (!sev_guest(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (IS_ERR(region->pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ret = PTR_ERR(region->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) region->uaddr = range->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) region->size = range->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) list_add_tail(®ion->list, &sev->regions_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * The guest may change the memory encryption attribute from C=0 -> C=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * or vice versa for this memory range. Lets make sure caches are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * flushed to ensure that guest data gets written into memory with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * correct C-bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) sev_clflush_pages(region->pages, region->npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) kfree(region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static struct enc_region *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct list_head *head = &sev->regions_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct enc_region *i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) list_for_each_entry(i, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (i->uaddr == range->addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) i->size == range->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static void __unregister_enc_region_locked(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct enc_region *region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) sev_unpin_memory(kvm, region->pages, region->npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) list_del(®ion->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) kfree(region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) int svm_unregister_enc_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct kvm_enc_region *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct enc_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (!sev_guest(kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) region = find_enc_region(kvm, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * Ensure that all guest tagged cache entries are flushed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * releasing the pages back to the system for use. CLFLUSH will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * not do this, so issue a WBINVD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) wbinvd_on_all_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) __unregister_enc_region_locked(kvm, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) void sev_vm_destroy(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct list_head *head = &sev->regions_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct list_head *pos, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (!sev_guest(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * Ensure that all guest tagged cache entries are flushed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * releasing the pages back to the system for use. CLFLUSH will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * not do this, so issue a WBINVD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) wbinvd_on_all_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * if userspace was terminated before unregistering the memory regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * then lets unpin all the registered memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) list_for_each_safe(pos, q, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) __unregister_enc_region_locked(kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) list_entry(pos, struct enc_region, list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) sev_unbind_asid(kvm, sev->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) sev_asid_free(sev->asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) int __init sev_hardware_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* Maximum number of encrypted guests supported simultaneously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) max_sev_asid = cpuid_ecx(0x8000001F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (!svm_sev_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* Minimum ASID value that should be used for SEV guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) min_sev_asid = cpuid_edx(0x8000001F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /* Initialize SEV ASID bitmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (!sev_asid_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (!sev_reclaim_asid_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) pr_info("SEV supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) void sev_hardware_teardown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!svm_sev_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) bitmap_free(sev_asid_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) bitmap_free(sev_reclaim_asid_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) sev_flush_asids();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) void pre_sev_run(struct vcpu_svm *svm, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) int asid = sev_get_asid(svm->vcpu.kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /* Assign the asid allocated with this SEV guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) svm->vmcb->control.asid = asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * Flush guest TLB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * 1) when different VMCB for the same ASID is to be run on the same host CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (sd->sev_vmcbs[asid] == svm->vmcb &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) svm->vcpu.arch.last_vmentry_cpu == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) sd->sev_vmcbs[asid] = svm->vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }