summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-19 01:32:31 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-19 01:32:31 +0400
commit8a5de18239e418fe7b1f36504834689f754d8ccc (patch)
tree8d05ae77da1d4a8512b6052e2ba23571543666c7 /virt
parent857b50f5d0eed113428c864e927289d8f5f2b864 (diff)
parent2df36a5dd6792870bef48f63bfca42055ea5b79c (diff)
downloadlinux-8a5de18239e418fe7b1f36504834689f754d8ccc.tar.xz
Merge tag 'kvm-arm-for-3.18-take-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm
Pull second batch of changes for KVM/{arm,arm64} from Marc Zyngier: "The most obvious thing is the sizeable MMU changes to support 48bit VAs on arm64. Summary: - support for 48bit IPA and VA (EL2) - a number of fixes for devices mapped into guests - yet another VGIC fix for BE - a fix for CPU hotplug - a few compile fixes (disabled VGIC, strict mm checks)" [ I'm pulling directly from Marc at the request of Paolo Bonzini, whose backpack was stolen at Düsseldorf airport and will do new keys and rebuild his web of trust. - Linus ] * tag 'kvm-arm-for-3.18-take-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm: arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs arm: kvm: STRICT_MM_TYPECHECKS fix for user_mem_abort arm/arm64: KVM: Ensure memslots are within KVM_PHYS_SIZE arm64: KVM: Implement 48 VA support for KVM EL2 and Stage-2 arm/arm64: KVM: map MMIO regions at creation time arm64: kvm: define PAGE_S2_DEVICE as read-only by default ARM: kvm: define PAGE_S2_DEVICE as read-only by default arm/arm64: KVM: add 'writable' parameter to kvm_phys_addr_ioremap arm/arm64: KVM: fix potential NULL dereference in user_mem_abort() arm/arm64: KVM: use __GFP_ZERO not memset() to get zeroed pages ARM: KVM: fix vgic-disabled build arm: kvm: fix CPU hotplug
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic-v2.c24
-rw-r--r--virt/kvm/arm/vgic.c21
2 files changed, 21 insertions, 24 deletions
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 416baedfc89f..2935405ad22f 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -71,35 +71,17 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc)
{
if (!(lr_desc.state & LR_STATE_MASK))
- __set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
}
static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
{
- u64 val;
-
-#if BITS_PER_LONG == 64
- val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
- val <<= 32;
- val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
-#else
- val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
-#endif
- return val;
+ return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
}
static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
{
- u64 val;
-
-#if BITS_PER_LONG == 64
- val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
- val <<= 32;
- val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
-#else
- val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
-#endif
- return val;
+ return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
}
static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 862967852d5a..3aaca49de325 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -145,6 +145,20 @@ static void vgic_free_bitmap(struct vgic_bitmap *b)
b->shared = NULL;
}
+/*
+ * Call this function to convert a u64 value to an unsigned long * bitmask
+ * in a way that works on both 32-bit and 64-bit LE and BE platforms.
+ *
+ * Warning: Calling this function may modify *val.
+ */
+static unsigned long *u64_to_bitmask(u64 *val)
+{
+#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
+ *val = (*val >> 32) | (*val << 32);
+#endif
+ return (unsigned long *)val;
+}
+
static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
int cpuid, u32 offset)
{
@@ -1442,7 +1456,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
* active bit.
*/
u64 eisr = vgic_get_eisr(vcpu);
- unsigned long *eisr_ptr = (unsigned long *)&eisr;
+ unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
int lr;
for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
@@ -1505,7 +1519,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
level_pending = vgic_process_maintenance(vcpu);
elrsr = vgic_get_elrsr(vcpu);
- elrsr_ptr = (unsigned long *)&elrsr;
+ elrsr_ptr = u64_to_bitmask(&elrsr);
/* Clear mappings for empty LRs */
for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
@@ -1899,7 +1913,8 @@ int kvm_vgic_init(struct kvm *kvm)
}
ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
- vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
+ vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
+ true);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
goto out;