summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/pmu.c
diff options
context:
space:
mode:
authorLike Xu <likexu@tencent.com>2023-06-03 04:10:57 +0300
committerSean Christopherson <seanjc@google.com>2023-06-07 03:31:44 +0300
commit4a2771895ca63a055df815be5e307cce8e85308c (patch)
treee27fb5669aca01459be7ad6c975ddfec98a91c37 /arch/x86/kvm/pmu.c
parentfe8d76c1a6f04387953727ee3e63469956e7360d (diff)
downloadlinux-4a2771895ca63a055df815be5e307cce8e85308c.tar.xz
KVM: x86/svm/pmu: Add AMD PerfMonV2 support
If AMD Performance Monitoring Version 2 (PerfMonV2) is detected by the guest, it can use a new scheme to manage the Core PMCs using the new global control and status registers. In addition to benefiting from the PerfMonV2 functionality in the same way as the host (higher precision), the guest also can reduce the number of vm-exits by lowering the total number of MSRs accesses. In terms of implementation details, amd_is_valid_msr() is resurrected since three newly added MSRs could not be mapped to one vPMC. The possibility of emulating PerfMonV2 on the mainframe has also been eliminated for reasons of precision. Co-developed-by: Sandipan Das <sandipan.das@amd.com> Signed-off-by: Sandipan Das <sandipan.das@amd.com> Signed-off-by: Like Xu <likexu@tencent.com> [sean: drop "Based on the observed HW." comments] Link: https://lore.kernel.org/r/20230603011058.1038821-12-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/pmu.c')
-rw-r--r--arch/x86/kvm/pmu.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 4315f46aabfb..bf653df86112 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -585,11 +585,14 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr) {
case MSR_CORE_PERF_GLOBAL_STATUS:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
msr_info->data = pmu->global_status;
break;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
case MSR_CORE_PERF_GLOBAL_CTRL:
msr_info->data = pmu->global_ctrl;
break;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
msr_info->data = 0;
break;
@@ -607,16 +610,28 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
u64 data = msr_info->data;
u64 diff;
+ /*
+ * Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
+ * whereas Intel generates #GP on attempts to write reserved/RO MSRs.
+ */
switch (msr) {
case MSR_CORE_PERF_GLOBAL_STATUS:
if (!msr_info->host_initiated)
return 1; /* RO MSR */
+ fallthrough;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
+ /* Per PPR, Read-only MSR. Writes are ignored. */
+ if (!msr_info->host_initiated)
+ break;
if (data & pmu->global_status_mask)
return 1;
pmu->global_status = data;
break;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
+ data &= ~pmu->global_ctrl_mask;
+ fallthrough;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (!kvm_valid_perf_global_ctrl(pmu, data))
return 1;
@@ -634,7 +649,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
*/
if (data & pmu->global_status_mask)
return 1;
-
+ fallthrough;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
if (!msr_info->host_initiated)
pmu->global_status &= ~data;
break;