summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_hv_p9_entry.c
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2022-05-19 16:10:42 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2022-05-19 16:10:42 +0300
commitb104e41cda1ef9c5e851a7de3f30b53535e7d528 (patch)
tree7bab688d125e67e42d387b97c45522c76155bbe2 /arch/powerpc/kvm/book3s_hv_p9_entry.c
parenta5fc286f69fc9590c22995fe05dca461fd6295b1 (diff)
parentad55bae7dc364417434b69dd6c30104f20d0f84d (diff)
downloadlinux-b104e41cda1ef9c5e851a7de3f30b53535e7d528.tar.xz
Merge branch 'topic/ppc-kvm' into next
Merge our KVM topic branch.
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_p9_entry.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index ac38c1cad378..112a09b33328 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -539,8 +539,10 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
{
struct kvm_nested_guest *nested = vcpu->arch.nested;
u32 lpid;
+ u32 pid;
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+ pid = vcpu->arch.pid;
/*
* Prior memory accesses to host PID Q3 must be completed before we
@@ -551,7 +553,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
isync();
mtspr(SPRN_LPID, lpid);
mtspr(SPRN_LPCR, lpcr);
- mtspr(SPRN_PID, vcpu->arch.pid);
+ mtspr(SPRN_PID, pid);
/*
* isync not required here because we are HRFID'ing to guest before
* any guest context access, which is context synchronising.
@@ -561,9 +563,11 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
{
u32 lpid;
+ u32 pid;
int i;
lpid = kvm->arch.lpid;
+ pid = vcpu->arch.pid;
/*
* See switch_mmu_to_guest_radix. ptesync should not be required here
@@ -574,7 +578,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
isync();
mtspr(SPRN_LPID, lpid);
mtspr(SPRN_LPCR, lpcr);
- mtspr(SPRN_PID, vcpu->arch.pid);
+ mtspr(SPRN_PID, pid);
for (i = 0; i < vcpu->arch.slb_max; i++)
mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
@@ -585,6 +589,9 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
{
+ u32 lpid = kvm->arch.host_lpid;
+ u64 lpcr = kvm->arch.host_lpcr;
+
/*
* The guest has exited, so guest MMU context is no longer being
* non-speculatively accessed, but a hwsync is needed before the
@@ -594,8 +601,8 @@ static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
asm volatile("hwsync" ::: "memory");
isync();
mtspr(SPRN_PID, pid);
- mtspr(SPRN_LPID, kvm->arch.host_lpid);
- mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
+ mtspr(SPRN_LPID, lpid);
+ mtspr(SPRN_LPCR, lpcr);
/*
* isync is not required after the switch, because mtmsrd with L=0
* is performed after this switch, which is context synchronising.