summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c45
1 files changed, 30 insertions, 15 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 579a0b51696a..906a7e84200f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -18,6 +18,7 @@
#include "irq.h"
#include "mmu.h"
+#include "cpuid.h"
#include <linux/kvm_host.h>
#include <linux/module.h>
@@ -1747,7 +1748,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
int save_nmsrs, index;
unsigned long *msr_bitmap;
- vmx_load_host_state(vmx);
save_nmsrs = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) {
@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
#endif
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
+ CPU_BASED_RDPMC_EXITING |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
/*
* We can allow some features even when not supported by the
@@ -2142,12 +2143,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
return 1;
/* Otherwise falls through */
default:
- vmx_load_host_state(to_vmx(vcpu));
if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
return 0;
msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
- vmx_load_host_state(to_vmx(vcpu));
data = msr->data;
break;
}
@@ -2171,7 +2170,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
switch (msr_index) {
case MSR_EFER:
- vmx_load_host_state(vmx);
ret = kvm_set_msr_common(vcpu, msr_index, data);
break;
#ifdef CONFIG_X86_64
@@ -2220,7 +2218,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
break;
msr = find_msr_entry(vmx, msr_index);
if (msr) {
- vmx_load_host_state(vmx);
msr->data = data;
break;
}
@@ -2414,7 +2411,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING |
- CPU_BASED_INVLPG_EXITING;
+ CPU_BASED_INVLPG_EXITING |
+ CPU_BASED_RDPMC_EXITING;
if (yield_on_hlt)
min |= CPU_BASED_HLT_EXITING;
@@ -2716,11 +2714,13 @@ static gva_t rmode_tss_base(struct kvm *kvm)
{
if (!kvm->arch.tss_addr) {
struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
gfn_t base_gfn;
slots = kvm_memslots(kvm);
- base_gfn = slots->memslots[0].base_gfn +
- kvm->memslots->memslots[0].npages - 3;
+ slot = id_to_memslot(slots, 0);
+ base_gfn = slot->base_gfn + slot->npages - 3;
+
return base_gfn << PAGE_SHIFT;
}
return kvm->arch.tss_addr;
@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
- if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
- /* We can get here when nested_run_pending caused
- * vmx_interrupt_allowed() to return false. In this case, do
- * nothing - the interrupt will be injected later.
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
+ /*
+ * We get here if vmx_interrupt_allowed() said we can't
+ * inject to L1 now because L2 must run. Ask L2 to exit
+ * right after entry, so we can inject to L1 more promptly.
*/
+ kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
return;
+ }
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
@@ -4077,11 +4080,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
- struct vmcs12 *vmcs12;
- if (to_vmx(vcpu)->nested.nested_run_pending)
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ if (to_vmx(vcpu)->nested.nested_run_pending ||
+ (vmcs12->idt_vectoring_info_field &
+ VECTORING_INFO_VALID_MASK))
return 0;
nested_vmx_vmexit(vcpu);
- vmcs12 = get_vmcs12(vcpu);
vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
vmcs12->vm_exit_intr_info = 0;
/* fall through to normal code, but now in L1, not L2 */
@@ -4611,6 +4615,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_rdpmc(struct kvm_vcpu *vcpu)
+{
+ int err;
+
+ err = kvm_rdpmc(vcpu);
+ kvm_complete_insn_gp(vcpu, err);
+
+ return 1;
+}
+
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
skip_emulated_instruction(vcpu);
@@ -5561,6 +5575,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_INVD] = handle_invd,
[EXIT_REASON_INVLPG] = handle_invlpg,
+ [EXIT_REASON_RDPMC] = handle_rdpmc,
[EXIT_REASON_VMCALL] = handle_vmcall,
[EXIT_REASON_VMCLEAR] = handle_vmclear,
[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,