summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2021-03-05 21:31:18 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2021-03-15 11:44:01 +0300
commitcdbd4b40e70c63e15554120cf486194fd16cb905 (patch)
treeb4d47dddcfc82a2ae5d250168293305b44967cb9 /arch/x86
parent446f7f11553028feee34d5cfcf25a87969283255 (diff)
downloadlinux-cdbd4b40e70c63e15554120cf486194fd16cb905.tar.xz
KVM: VMX: Invalidate hv_tlb_eptp to denote an EPTP mismatch
Drop the dedicated 'ept_pointers_match' field in favor of stuffing 'hv_tlb_eptp' with INVALID_PAGE to mark it as invalid, i.e. to denote that there is at least one EPTP mismatch. Use a local variable to track whether or not a mismatch is detected so that hv_tlb_eptp can be used to skip redundant flushes. No functional change intended. Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210305183123.3978098-7-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx/vmx.c35
-rw-r--r--arch/x86/kvm/vmx/vmx.h7
2 files changed, 23 insertions, 19 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 67457a73abb9..2dc7e28bb8e5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -500,32 +500,44 @@ static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
{
struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
struct kvm_vcpu *vcpu;
- int ret = 0, i;
+ int ret = 0, i, nr_unique_valid_eptps;
u64 tmp_eptp;
spin_lock(&kvm_vmx->ept_pointer_lock);
- if (kvm_vmx->ept_pointers_match != EPT_POINTERS_MATCH) {
- kvm_vmx->ept_pointers_match = EPT_POINTERS_MATCH;
- kvm_vmx->hv_tlb_eptp = INVALID_PAGE;
+ if (!VALID_PAGE(kvm_vmx->hv_tlb_eptp)) {
+ nr_unique_valid_eptps = 0;
+ /*
+ * Flush all valid EPTPs, and see if all vCPUs have converged
+ * on a common EPTP, in which case future flushes can skip the
+ * loop and flush the common EPTP.
+ */
kvm_for_each_vcpu(i, vcpu, kvm) {
tmp_eptp = to_vmx(vcpu)->ept_pointer;
if (!VALID_PAGE(tmp_eptp) ||
tmp_eptp == kvm_vmx->hv_tlb_eptp)
continue;
- if (!VALID_PAGE(kvm_vmx->hv_tlb_eptp))
+ /*
+ * Set the tracked EPTP to the first valid EPTP. Keep
+ * this EPTP for the entirety of the loop even if more
+ * EPTPs are encountered as a low effort optimization
+ * to avoid flushing the same (first) EPTP again.
+ */
+ if (++nr_unique_valid_eptps == 1)
kvm_vmx->hv_tlb_eptp = tmp_eptp;
- else
- kvm_vmx->ept_pointers_match
- = EPT_POINTERS_MISMATCH;
ret |= hv_remote_flush_eptp(tmp_eptp, range);
}
- if (kvm_vmx->ept_pointers_match == EPT_POINTERS_MISMATCH)
+
+ /*
+ * The optimized flush of a single EPTP can't be used if there
+ * are multiple valid EPTPs (obviously).
+ */
+ if (nr_unique_valid_eptps > 1)
kvm_vmx->hv_tlb_eptp = INVALID_PAGE;
- } else if (VALID_PAGE(kvm_vmx->hv_tlb_eptp)) {
+ } else {
ret = hv_remote_flush_eptp(kvm_vmx->hv_tlb_eptp, range);
}
@@ -3105,8 +3117,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
if (kvm_x86_ops.tlb_remote_flush) {
spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
to_vmx(vcpu)->ept_pointer = eptp;
- to_kvm_vmx(kvm)->ept_pointers_match
- = EPT_POINTERS_CHECK;
+ to_kvm_vmx(kvm)->hv_tlb_eptp = INVALID_PAGE;
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index f846cf3a5d25..fb7b2000bd0e 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -338,12 +338,6 @@ struct vcpu_vmx {
} shadow_msr_intercept;
};
-enum ept_pointers_status {
- EPT_POINTERS_CHECK = 0,
- EPT_POINTERS_MATCH = 1,
- EPT_POINTERS_MISMATCH = 2
-};
-
struct kvm_vmx {
struct kvm kvm;
@@ -352,7 +346,6 @@ struct kvm_vmx {
gpa_t ept_identity_map_addr;
hpa_t hv_tlb_eptp;
- enum ept_pointers_status ept_pointers_match;
spinlock_t ept_pointer_lock;
};