summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2008-11-10 23:57:36 +0300
committerAvi Kivity <avi@redhat.com>2008-12-31 17:52:26 +0300
commitfe4e771d5c37f0949047faf95d16a512b21406bf (patch)
tree518b4ff17a8e1e62b747ce1912c08b62883d2855 /arch/powerpc/kvm
parentdf9b856c454e331bc394c80903fcdea19cae2a33 (diff)
downloadlinux-fe4e771d5c37f0949047faf95d16a512b21406bf.tar.xz
KVM: ppc: fix userspace mapping invalidation on context switch
We used to defer invalidating userspace TLB entries until jumping out of the kernel. This was causing MMU weirdness most easily triggered by using a pipe in the guest, e.g. "dmesg | tail". I believe the problem was that after the guest kernel changed the PID (part of context switch), the old process's mappings were still present, and so copy_to_user() on the "return to new process" path ended up using stale mappings. Testing with large pages (64K) exposed the problem, probably because with 4K pages, pressure on the TLB faulted all process A's mappings out before the guest kernel could insert any for process B. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/44x_emulate.c9
-rw-r--r--arch/powerpc/kvm/44x_tlb.c31
2 files changed, 18 insertions, 22 deletions
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 9bc50cebf9ec..9ef79c78ede9 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -21,6 +21,7 @@
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/disassemble.h>
+#include <asm/kvm_44x.h>
#include "booke.h"
#include "44x_tlb.h"
@@ -38,14 +39,6 @@
#define XOP_ICCCI 966
#define XOP_TLBWE 978
-static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
-{
- if (vcpu->arch.pid != new_pid) {
- vcpu->arch.pid = new_pid;
- vcpu->arch.swap_pid = 1;
- }
-}
-
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.srr0;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 6fadbd696021..ee2461860bcf 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -268,31 +268,34 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
}
}
-/* Invalidate all mappings on the privilege switch after PID has been changed.
- * The guest always runs with PID=1, so we must clear the entire TLB when
- * switching address spaces. */
void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
{
+ vcpu->arch.shadow_pid = !usermode;
+}
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
+{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i;
- if (vcpu->arch.swap_pid) {
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i <= tlb_44x_hwater; i++) {
- struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
+ if (unlikely(vcpu->arch.pid == new_pid))
+ return;
+
+ vcpu->arch.pid = new_pid;
+
+ /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
+ * can't access guest kernel mappings (TID=1). When we switch to a new
+ * guest PID, which will also use host PID=0, we must discard the old guest
+ * userspace mappings. */
+ for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_tlb); i++) {
+ struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
- /* Future optimization: clear only userspace mappings. */
+ if (get_tlb_tid(stlbe) == 0) {
kvmppc_44x_shadow_release(vcpu, i);
stlbe->word0 = 0;
kvmppc_tlbe_set_modified(vcpu, i);
- KVMTRACE_5D(STLB_INVAL, vcpu, i,
- stlbe->tid, stlbe->word0, stlbe->word1,
- stlbe->word2, handler);
}
- vcpu->arch.swap_pid = 0;
}
-
- vcpu->arch.shadow_pid = !usermode;
}
static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,