summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-04 22:36:19 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-04 22:36:19 +0300
commit2d62e0768d3c28536d4cfe4c40ba1e5e8e442a93 (patch)
tree333f8cbcdb3b650813d758711a9e4ceee7b6fbce /arch
parentbe834aafdf5f8a37c191e697ac8ee6d53ab5020c (diff)
parent16ce771b93ab569490fd27415694132a7ade0d79 (diff)
downloadlinux-2d62e0768d3c28536d4cfe4c40ba1e5e8e442a93.tar.xz
Merge tag 'kvm-4.11-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Radim Krčmář: "Second batch of KVM changes for the 4.11 merge window: PPC: - correct assumption about ASDR on POWER9 - fix MMIO emulation on POWER9 x86: - add a simple test for ioperm - cleanup TSS (going through KVM tree as the whole undertaking was caused by VMX's use of TSS) - fix nVMX interrupt delivery - fix some performance counters in the guest ... and two cleanup patches" * tag 'kvm-4.11-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: nVMX: Fix pending events injection x86/kvm/vmx: remove unused variable in segment_base() selftests/x86: Add a basic selftest for ioperm x86/asm: Tidy up TSS limit code kvm: convert kvm.users_count from atomic_t to refcount_t KVM: x86: never specify a sample period for virtualized in_tx_cp counters KVM: PPC: Book3S HV: Don't use ASDR for real-mode HPT faults on POWER9 KVM: PPC: Book3S HV: Fix software walk of guest process page tables
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S8
-rw-r--r--arch/x86/include/asm/desc.h18
-rw-r--r--arch/x86/kernel/ioport.c8
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kvm/pmu.c13
-rw-r--r--arch/x86/kvm/vmx.c9
8 files changed, 46 insertions, 24 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 1145dc8e726d..805d4105e9bb 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -46,7 +46,7 @@ extern struct patb_entry *partition_tb;
/* Bits in patb0 field */
#define PATB_HR (1UL << 63)
-#define RPDB_MASK 0x0ffffffffffff00fUL
+#define RPDB_MASK 0x0fffffffffffff00UL
#define RPDB_SHIFT (1UL << 8)
#define RTS1_SHIFT 61 /* top 2 bits of radix tree size */
#define RTS1_MASK (3UL << RTS1_SHIFT)
@@ -57,6 +57,7 @@ extern struct patb_entry *partition_tb;
/* Bits in patb1 field */
#define PATB_GR (1UL << 63) /* guest uses radix; must match HR */
#define PRTS_MASK 0x1f /* process table size field */
+#define PRTB_MASK 0x0ffffffffffff000UL
/*
* Limit process table to PAGE_SIZE table. This
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 4344651f408c..f6b3e67c5762 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -32,6 +32,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
u32 pid;
int ret, level, ps;
__be64 prte, rpte;
+ unsigned long ptbl;
unsigned long root, pte, index;
unsigned long rts, bits, offset;
unsigned long gpa;
@@ -53,8 +54,8 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return -EINVAL;
/* Read partition table to find root of tree for effective PID */
- ret = kvm_read_guest(kvm, kvm->arch.process_table + pid * 16,
- &prte, sizeof(prte));
+ ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
+ ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
if (ret)
return ret;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 47414a6fe2dd..7c6477d1840a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1787,12 +1787,12 @@ kvmppc_hdsi:
/* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
beq 1f /* if not, send it to the guest */
+ andi. r0, r11, MSR_DR /* data relocation enabled? */
+ beq 3f
BEGIN_FTR_SECTION
mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
b 4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- andi. r0, r11, MSR_DR /* data relocation enabled? */
- beq 3f
clrrdi r0, r4, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
@@ -1879,12 +1879,12 @@ kvmppc_hisi:
bne .Lradix_hisi /* for radix, just save ASDR */
andis. r0, r11, SRR1_ISI_NOPT@h
beq 1f
+ andi. r0, r11, MSR_IR /* instruction relocation enabled? */
+ beq 3f
BEGIN_FTR_SECTION
mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
b 4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- andi. r0, r11, MSR_IR /* instruction relocation enabled? */
- beq 3f
clrrdi r0, r10, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_INST_SEGMENT
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index cb8f9149f6c8..1548ca92ad3f 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -205,6 +205,8 @@ static inline void native_load_tr_desc(void)
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
}
+DECLARE_PER_CPU(bool, __tss_limit_invalid);
+
static inline void force_reload_TR(void)
{
struct desc_struct *d = get_cpu_gdt_table(smp_processor_id());
@@ -220,18 +222,20 @@ static inline void force_reload_TR(void)
write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);
load_TR_desc();
+ this_cpu_write(__tss_limit_invalid, false);
}
-DECLARE_PER_CPU(bool, need_tr_refresh);
-
-static inline void refresh_TR(void)
+/*
+ * Call this if you need the TSS limit to be correct, which should be the case
+ * if and only if you have TIF_IO_BITMAP set or you're switching to a task
+ * with TIF_IO_BITMAP set.
+ */
+static inline void refresh_tss_limit(void)
{
DEBUG_LOCKS_WARN_ON(preemptible());
- if (unlikely(this_cpu_read(need_tr_refresh))) {
+ if (unlikely(this_cpu_read(__tss_limit_invalid)))
force_reload_TR();
- this_cpu_write(need_tr_refresh, false);
- }
}
/*
@@ -250,7 +254,7 @@ static inline void invalidate_tss_limit(void)
if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
force_reload_TR();
else
- this_cpu_write(need_tr_refresh, true);
+ this_cpu_write(__tss_limit_invalid, true);
}
static inline void native_load_gdt(const struct desc_ptr *dtr)
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index ca49bab3e467..9c3cf0944bce 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -48,8 +48,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
t->io_bitmap_ptr = bitmap;
set_thread_flag(TIF_IO_BITMAP);
+ /*
+ * Now that we have an IO bitmap, we need our TSS limit to be
+ * correct. It's fine if we are preempted after doing this:
+ * with TIF_IO_BITMAP set, context switches will keep our TSS
+ * limit correct.
+ */
preempt_disable();
- refresh_TR();
+ refresh_tss_limit();
preempt_enable();
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 56b059486c3b..f67591561711 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -69,8 +69,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
};
EXPORT_PER_CPU_SYMBOL(cpu_tss);
-DEFINE_PER_CPU(bool, need_tr_refresh);
-EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh);
+DEFINE_PER_CPU(bool, __tss_limit_invalid);
+EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
/*
* this gets called so that we can store lazy state into memory and copy the
@@ -222,7 +222,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
* Make sure that the TSS limit is correct for the CPU
* to notice the IO bitmap.
*/
- refresh_TR();
+ refresh_tss_limit();
} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
/*
* Clear any possible leftover bits:
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 06ce377dcbc9..026db42a86c3 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -113,12 +113,19 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
.config = config,
};
+ attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+
if (in_tx)
attr.config |= HSW_IN_TX;
- if (in_tx_cp)
+ if (in_tx_cp) {
+ /*
+ * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
+ * period. Just clear the sample period so at least
+ * allocating the counter doesn't fail.
+ */
+ attr.sample_period = 0;
attr.config |= HSW_IN_TX_CHECKPOINTED;
-
- attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+ }
event = perf_event_create_kernel_counter(&attr, -1, current,
intr ? kvm_perf_overflow_intr :
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ef4ba71dbb66..283aa8601833 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2053,7 +2053,6 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
static unsigned long segment_base(u16 selector)
{
struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
- struct desc_struct *d;
struct desc_struct *table;
unsigned long v;
@@ -10642,6 +10641,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (vcpu->arch.exception.pending ||
+ vcpu->arch.nmi_injected ||
+ vcpu->arch.interrupt.pending)
+ return -EBUSY;
+
if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
vmx->nested.preemption_timer_expired) {
if (vmx->nested.nested_run_pending)
@@ -10651,8 +10655,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
}
if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
- if (vmx->nested.nested_run_pending ||
- vcpu->arch.interrupt.pending)
+ if (vmx->nested.nested_run_pending)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
NMI_VECTOR | INTR_TYPE_NMI_INTR |