summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/hyp/tlb.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2019-07-11 16:14:16 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2019-07-11 16:14:16 +0300
commita45ff5994c9cde41af627c46abb9f32beae68943 (patch)
treeb726cc506ed1b01484f183ab2679cdd618e1e9b1 /arch/arm64/kvm/hyp/tlb.c
parent429bb83af8bcea0115eb34fd7ed94a35166d8384 (diff)
parent1e0cf16cdad1ba53e9eeee8746fe57de42f20c97 (diff)
downloadlinux-a45ff5994c9cde41af627c46abb9f32beae68943.tar.xz
Merge tag 'kvm-arm-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm updates for 5.3 - Add support for chained PMU counters in guests - Improve SError handling - Handle Neoverse N1 erratum #1349291 - Allow side-channel mitigation status to be migrated - Standardise most AArch64 system register accesses to msr_s/mrs_s - Fix host MPIDR corruption on 32bit
Diffstat (limited to 'arch/arm64/kvm/hyp/tlb.c')
-rw-r--r--arch/arm64/kvm/hyp/tlb.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 76c30866069e..d49a14497715 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/irqflags.h>
@@ -44,12 +33,12 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
* in the TCR_EL1 register. We also need to prevent it to
* allocate IPA->PA walks, so we enable the S1 MMU...
*/
- val = cxt->tcr = read_sysreg_el1(tcr);
+ val = cxt->tcr = read_sysreg_el1(SYS_TCR);
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
- write_sysreg_el1(val, tcr);
- val = cxt->sctlr = read_sysreg_el1(sctlr);
+ write_sysreg_el1(val, SYS_TCR);
+ val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
val |= SCTLR_ELx_M;
- write_sysreg_el1(val, sctlr);
+ write_sysreg_el1(val, SYS_SCTLR);
}
/*
@@ -96,8 +85,8 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
/* Restore the registers to what they were */
- write_sysreg_el1(cxt->tcr, tcr);
- write_sysreg_el1(cxt->sctlr, sctlr);
+ write_sysreg_el1(cxt->tcr, SYS_TCR);
+ write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
}
local_irq_restore(cxt->flags);