summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm/sev.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm/sev.c')
-rw-r--r--arch/x86/kvm/svm/sev.c146
1 files changed, 106 insertions, 40 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 9858d5ae9ddd..874ea309279f 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -22,6 +22,7 @@
#include "x86.h"
#include "svm.h"
+#include "svm_ops.h"
#include "cpuid.h"
#include "trace.h"
@@ -342,6 +343,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
unsigned long first, last;
int ret;
+ lockdep_assert_held(&kvm->lock);
+
if (ulen == 0 || uaddr + ulen < uaddr)
return ERR_PTR(-EINVAL);
@@ -1039,6 +1042,74 @@ e_unpin_memory:
return ret;
}
+static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ void __user *report = (void __user *)(uintptr_t)argp->data;
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_attestation_report *data;
+ struct kvm_sev_attestation_report params;
+ void __user *p;
+ void *blob = NULL;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
+ if (!data)
+ return -ENOMEM;
+
+ /* User wants to query the blob length */
+ if (!params.len)
+ goto cmd;
+
+ p = (void __user *)(uintptr_t)params.uaddr;
+ if (p) {
+ if (params.len > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EINVAL;
+ goto e_free;
+ }
+
+ ret = -ENOMEM;
+ blob = kmalloc(params.len, GFP_KERNEL);
+ if (!blob)
+ goto e_free;
+
+ data->address = __psp_pa(blob);
+ data->len = params.len;
+ memcpy(data->mnonce, params.mnonce, sizeof(params.mnonce));
+ }
+cmd:
+ data->handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &argp->error);
+ /*
+ * If we query the session length, FW responded with expected data.
+ */
+ if (!params.len)
+ goto done;
+
+ if (ret)
+ goto e_free_blob;
+
+ if (blob) {
+ if (copy_to_user(p, blob, params.len))
+ ret = -EFAULT;
+ }
+
+done:
+ params.len = data->len;
+ if (copy_to_user(report, &params, sizeof(params)))
+ ret = -EFAULT;
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -1089,6 +1160,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
case KVM_SEV_LAUNCH_SECRET:
r = sev_launch_secret(kvm, &sev_cmd);
break;
+ case KVM_SEV_GET_ATTESTATION_REPORT:
+ r = sev_get_attestation_report(kvm, &sev_cmd);
+ break;
default:
r = -EINVAL;
goto out;
@@ -1119,12 +1193,20 @@ int svm_register_enc_region(struct kvm *kvm,
if (!region)
return -ENOMEM;
+ mutex_lock(&kvm->lock);
region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
if (IS_ERR(region->pages)) {
ret = PTR_ERR(region->pages);
+ mutex_unlock(&kvm->lock);
goto e_free;
}
+ region->uaddr = range->addr;
+ region->size = range->size;
+
+ list_add_tail(&region->list, &sev->regions_list);
+ mutex_unlock(&kvm->lock);
+
/*
* The guest may change the memory encryption attribute from C=0 -> C=1
* or vice versa for this memory range. Lets make sure caches are
@@ -1133,13 +1215,6 @@ int svm_register_enc_region(struct kvm *kvm,
*/
sev_clflush_pages(region->pages, region->npages);
- region->uaddr = range->addr;
- region->size = range->size;
-
- mutex_lock(&kvm->lock);
- list_add_tail(&region->list, &sev->regions_list);
- mutex_unlock(&kvm->lock);
-
return ret;
e_free:
@@ -1415,16 +1490,13 @@ static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
* to be returned:
* GPRs RAX, RBX, RCX, RDX
*
- * Copy their values to the GHCB if they are dirty.
+ * Copy their values, even if they may not have been written during the
+ * VM-Exit. It's the guest's responsibility to not consume random data.
*/
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX))
- ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX))
- ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
- ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
- ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
+ ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
+ ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
+ ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
+ ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
}
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
@@ -1563,6 +1635,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
goto vmgexit_err;
break;
case SVM_VMGEXIT_NMI_COMPLETE:
+ case SVM_VMGEXIT_AP_HLT_LOOP:
case SVM_VMGEXIT_AP_JUMP_TABLE:
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break;
@@ -1888,6 +1961,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
break;
+ case SVM_VMGEXIT_AP_HLT_LOOP:
+ ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
+ break;
case SVM_VMGEXIT_AP_JUMP_TABLE: {
struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
@@ -1990,29 +2066,17 @@ void sev_es_create_vcpu(struct vcpu_svm *svm)
sev_enc_bit));
}
-void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
+void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
{
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
struct vmcb_save_area *hostsa;
- unsigned int i;
/*
* As an SEV-ES guest, hardware will restore the host state on VMEXIT,
* of which one step is to perform a VMLOAD. Since hardware does not
* perform a VMSAVE on VMRUN, the host savearea must be updated.
*/
- asm volatile(__ex("vmsave") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
-
- /*
- * Certain MSRs are restored on VMEXIT, only save ones that aren't
- * restored.
- */
- for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
- if (host_save_user_msrs[i].sev_es_restored)
- continue;
-
- rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
- }
+ vmsave(__sme_page_pa(sd->save_area));
/* XCR0 is restored on VMEXIT, save the current host value */
hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
@@ -2025,18 +2089,20 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
hostsa->xss = host_xss;
}
-void sev_es_vcpu_put(struct vcpu_svm *svm)
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{
- unsigned int i;
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /* First SIPI: Use the values as initially set by the VMM */
+ if (!svm->received_first_sipi) {
+ svm->received_first_sipi = true;
+ return;
+ }
/*
- * Certain MSRs are restored on VMEXIT and were saved with vmsave in
- * sev_es_vcpu_load() above. Only restore ones that weren't.
+ * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
+ * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
+ * non-zero value.
*/
- for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
- if (host_save_user_msrs[i].sev_es_restored)
- continue;
-
- wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
- }
+ ghcb_set_sw_exit_info_2(svm->ghcb, 1);
}