summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_hv_p9_entry.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-11-23 12:52:02 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2021-11-24 13:08:59 +0300
commitcb2553a093093ae46cfaee31321bcedcd0312c5d (patch)
tree9dd0e5b1fff974bc71773e5786e71454d340abe3 /arch/powerpc/kvm/book3s_hv_p9_entry.c
parent6547af3eba88e4806e853fee7547031b2cc6a560 (diff)
downloadlinux-cb2553a093093ae46cfaee31321bcedcd0312c5d.tar.xz
KVM: PPC: Book3S HV P9: Optimise timebase reads
Reduce the number of mfTB executed by passing the current timebase around entry and exit code rather than read it multiple times. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211123095231.1036501-25-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_p9_entry.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c33
1 files changed, 18 insertions, 15 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index e7793bb806eb..2bd96d8256d1 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -183,13 +183,13 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
}
}
-int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr)
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
s64 hdec, dec;
- u64 tb, purr, spurr;
+ u64 purr, spurr;
u64 *exsave;
bool ri_set;
int trap;
@@ -203,8 +203,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
unsigned long host_dawr1;
unsigned long host_dawrx1;
- tb = mftb();
- hdec = time_limit - tb;
+ hdec = time_limit - *tb;
if (hdec < 0)
return BOOK3S_INTERRUPT_HV_DECREMENTER;
@@ -230,11 +229,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
}
if (vc->tb_offset) {
- u64 new_tb = tb + vc->tb_offset;
+ u64 new_tb = *tb + vc->tb_offset;
mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ *tb = new_tb;
vc->tb_offset_applied = vc->tb_offset;
}
@@ -317,7 +318,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/
mtspr(SPRN_HDEC, hdec);
- mtspr(SPRN_DEC, vcpu->arch.dec_expires - tb);
+ mtspr(SPRN_DEC, vcpu->arch.dec_expires - *tb);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_return_to_guest:
@@ -466,15 +467,17 @@ tm_return_to_guest:
dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec;
- tb = mftb();
- vcpu->arch.dec_expires = dec + tb;
+ *tb = mftb();
+ vcpu->arch.dec_expires = dec + *tb;
if (vc->tb_offset_applied) {
- u64 new_tb = tb - vc->tb_offset_applied;
+ u64 new_tb = *tb - vc->tb_offset_applied;
mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ *tb = new_tb;
vc->tb_offset_applied = 0;
}