summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-08-27 12:25:10 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2021-09-22 17:33:15 +0300
commit0bbc2ca8515f9cdf11df84ccb63dc7c44bc3d8f4 (patch)
treef28f7a59b2f13d18d3c9038dd82b6ad50b66e761 /virt
parent85b640450ddcfa09cf72771b69a9c3daf0ddc772 (diff)
downloadlinux-0bbc2ca8515f9cdf11df84ccb63dc7c44bc3d8f4.tar.xz
KVM: KVM: Use cpumask_available() to check for NULL cpumask when kicking vCPUs
Check for a NULL cpumask_var_t when kicking multiple vCPUs via cpumask_available(), which performs a !NULL check if and only if cpumasks are configured to be allocated off-stack. This is a meaningless optimization, e.g. avoids a TEST+Jcc and TEST+CMOV on x86, but more importantly helps document that the NULL check is necessary even though all callers pass in a local variable. No functional change intended. Cc: Lai Jiangshan <jiangshanlai@gmail.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20210827092516.1027264-3-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 490c8cb8cc8d..e95e7a9e4d53 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -235,9 +235,13 @@ static void ack_flush(void *_completed)
{
}
-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
{
- if (unlikely(!cpus))
+ const struct cpumask *cpus;
+
+ if (likely(cpumask_available(tmp)))
+ cpus = tmp;
+ else
cpus = cpu_online_mask;
if (cpumask_empty(cpus))
@@ -268,6 +272,14 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
continue;
/*
+ * tmp can be "unavailable" if cpumasks are allocated off stack
+ * as allocation of the mask is deliberately not fatal and is
+ * handled by falling back to kicking all online CPUs.
+ */
+ if (!cpumask_available(tmp))
+ continue;
+
+ /*
* Note, the vCPU could get migrated to a different pCPU at any
* point after kvm_request_needs_ipi(), which could result in
* sending an IPI to the previous pCPU. But, that's ok because
@@ -278,7 +290,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
* were reading SPTEs _before_ any changes were finalized. See
* kvm_vcpu_kick() for more details on handling requests.
*/
- if (tmp != NULL && kvm_request_needs_ipi(vcpu, req)) {
+ if (kvm_request_needs_ipi(vcpu, req)) {
cpu = READ_ONCE(vcpu->cpu);
if (cpu != -1 && cpu != me)
__cpumask_set_cpu(cpu, tmp);