summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arch_timer.c43
-rw-r--r--virt/kvm/arm/arm.c44
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c48
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c4
-rw-r--r--virt/kvm/arm/mmio.c6
-rw-r--r--virt/kvm/arm/mmu.c24
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c8
-rw-r--r--virt/kvm/eventfd.c4
-rw-r--r--virt/kvm/kvm_main.c52
14 files changed, 125 insertions, 133 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4151250ce8da..cc29a8148328 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -92,16 +92,23 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
struct arch_timer_context *vtimer;
+ u32 cnt_ctl;
- if (!vcpu) {
- pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n");
- return IRQ_NONE;
- }
- vtimer = vcpu_vtimer(vcpu);
+ /*
+ * We may see a timer interrupt after vcpu_put() has been called which
+ * sets the CPU's vcpu pointer to NULL, because even though the timer
+ * has been disabled in vtimer_save_state(), the hardware interrupt
+ * signal may not have been retired from the interrupt controller yet.
+ */
+ if (!vcpu)
+ return IRQ_HANDLED;
+ vtimer = vcpu_vtimer(vcpu);
if (!vtimer->irq.level) {
- vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
- if (kvm_timer_irq_can_fire(vtimer))
+ cnt_ctl = read_sysreg_el0(cntv_ctl);
+ cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
+ ARCH_TIMER_CTRL_IT_MASK;
+ if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
kvm_timer_update_irq(vcpu, true, vtimer);
}
@@ -355,6 +362,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
/* Disable the virtual timer */
write_sysreg_el0(0, cntv_ctl);
+ isb();
vtimer->loaded = false;
out:
@@ -479,9 +487,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
vtimer_restore_state(vcpu);
- if (has_vhe())
- disable_el1_phys_timer_access();
-
/* Set the background timer for the physical timer emulation. */
phys_timer_emulate(vcpu);
}
@@ -510,9 +515,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
if (unlikely(!timer->enabled))
return;
- if (has_vhe())
- enable_el1_phys_timer_access();
-
vtimer_save_state(vcpu);
/*
@@ -726,7 +728,7 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
return 0;
}
-int kvm_timer_hyp_init(void)
+int kvm_timer_hyp_init(bool has_gic)
{
struct arch_timer_kvm_info *info;
int err;
@@ -762,10 +764,13 @@ int kvm_timer_hyp_init(void)
return err;
}
- err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus());
- if (err) {
- kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
- goto out_free_irq;
+ if (has_gic) {
+ err = irq_set_vcpu_affinity(host_vtimer_irq,
+ kvm_get_running_vcpus());
+ if (err) {
+ kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
+ goto out_free_irq;
+ }
}
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -841,7 +846,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
no_vgic:
preempt_disable();
timer->enabled = 1;
- kvm_timer_vcpu_load_vgic(vcpu);
+ kvm_timer_vcpu_load(vcpu);
preempt_enable();
return 0;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index a67c106d73f5..08464b2fba1d 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -53,8 +53,8 @@
__asm__(".arch_extension virt");
#endif
+DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
/* Per-CPU variable containing the currently running vcpu. */
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
@@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm->vcpus[i] = NULL;
}
}
+ atomic_set(&kvm->online_vcpus, 0);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
kvm_mmu_free_memory_caches(vcpu);
kvm_timer_vcpu_terminate(vcpu);
- kvm_vgic_vcpu_destroy(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -354,7 +354,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
+ vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
kvm_arm_set_running_vcpu(vcpu);
kvm_vgic_load(vcpu);
@@ -509,7 +509,7 @@ static void update_vttbr(struct kvm *kvm)
pgd_phys = virt_to_phys(kvm->arch.pgd);
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = pgd_phys | vmid;
+ kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
spin_unlock(&kvm_vmid_lock);
}
@@ -627,6 +627,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_handle_mmio_return(vcpu, vcpu->run);
if (ret)
return ret;
+ if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
+ return 0;
+
}
if (run->immediate_exit)
@@ -701,9 +704,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
trace_kvm_entry(*vcpu_pc(vcpu));
guest_enter_irqoff();
+ if (has_vhe())
+ kvm_arm_vhe_guest_enter();
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
+ if (has_vhe())
+ kvm_arm_vhe_guest_exit();
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
/*
@@ -756,6 +763,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+ /* Exit types that need handling before we can be preempted */
+ handle_exit_early(vcpu, run, ret);
+
preempt_enable();
ret = handle_exit(vcpu, run, ret);
@@ -1155,7 +1165,7 @@ static void cpu_init_hyp_mode(void *dummy)
pgd_ptr = kvm_mmu_get_httbr();
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
- vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+ vector_ptr = (unsigned long)kvm_get_hyp_vector();
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
@@ -1269,19 +1279,8 @@ static inline void hyp_cpu_pm_exit(void)
}
#endif
-static void teardown_common_resources(void)
-{
- free_percpu(kvm_host_cpu_state);
-}
-
static int init_common_resources(void)
{
- kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
- if (!kvm_host_cpu_state) {
- kvm_err("Cannot allocate host CPU state\n");
- return -ENOMEM;
- }
-
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
@@ -1323,7 +1322,7 @@ static int init_subsystems(void)
/*
* Init HYP architected timer support
*/
- err = kvm_timer_hyp_init();
+ err = kvm_timer_hyp_init(vgic_present);
if (err)
goto out;
@@ -1400,6 +1399,12 @@ static int init_hyp_mode(void)
goto out_err;
}
+ err = kvm_map_vectors();
+ if (err) {
+ kvm_err("Cannot map vectors\n");
+ goto out_err;
+ }
+
/*
* Map the Hyp stack pages
*/
@@ -1417,7 +1422,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) {
kvm_cpu_context_t *cpu_ctxt;
- cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
+ cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
if (err) {
@@ -1502,7 +1507,7 @@ int kvm_arch_init(void *opaque)
bool in_hyp_mode;
if (!is_hyp_mode_available()) {
- kvm_err("HYP mode not available\n");
+ kvm_info("HYP mode not available\n");
return -ENODEV;
}
@@ -1541,7 +1546,6 @@ out_hyp:
if (!in_hyp_mode)
teardown_hyp_mode();
out_err:
- teardown_common_resources();
return err;
}
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index f39861639f08..f24404b3c8df 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
write_sysreg(cntvoff, cntvoff_el2);
}
-void __hyp_text enable_el1_phys_timer_access(void)
-{
- u64 val;
-
- /* Allow physical timer/counter access for the host */
- val = read_sysreg(cnthctl_el2);
- val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
- write_sysreg(val, cnthctl_el2);
-}
-
-void __hyp_text disable_el1_phys_timer_access(void)
-{
- u64 val;
-
- /*
- * Disallow physical timer access for the guest
- * Physical counter access is allowed
- */
- val = read_sysreg(cnthctl_el2);
- val &= ~CNTHCTL_EL1PCEN;
- val |= CNTHCTL_EL1PCTEN;
- write_sysreg(val, cnthctl_el2);
-}
-
void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
{
/*
* We don't need to do this for VHE since the host kernel runs in EL2
* with HCR_EL2.TGE ==1, which makes those bits have no impact.
*/
- if (!has_vhe())
- enable_el1_phys_timer_access();
+ if (!has_vhe()) {
+ u64 val;
+
+ /* Allow physical timer/counter access for the host */
+ val = read_sysreg(cnthctl_el2);
+ val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+ write_sysreg(val, cnthctl_el2);
+ }
}
void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
{
- if (!has_vhe())
- disable_el1_phys_timer_access();
+ if (!has_vhe()) {
+ u64 val;
+
+ /*
+ * Disallow physical timer access for the guest
+ * Physical counter access is allowed
+ */
+ val = read_sysreg(cnthctl_el2);
+ val &= ~CNTHCTL_EL1PCEN;
+ val |= CNTHCTL_EL1PCTEN;
+ write_sysreg(val, cnthctl_el2);
+ }
}
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index a3f18d362366..d7fd46fe9efb 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
else
elrsr1 = 0;
-#ifdef CONFIG_CPU_BIG_ENDIAN
- cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
-#else
cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
-#endif
}
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index b6e715fd3c90..dac7ceb1a677 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
- data);
+ &data);
data = vcpu_data_host_to_guest(vcpu, data, len);
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
@@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
len);
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
kvm_mmio_write_buf(data_buf, len, data);
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
data_buf);
} else {
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
- fault_ipa, 0);
+ fault_ipa, NULL);
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
data_buf);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index b36945d49986..f8eaf86b740a 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
*/
void free_hyp_pgds(void)
{
- unsigned long addr;
-
mutex_lock(&kvm_hyp_pgd_mutex);
if (boot_hyp_pgd) {
@@ -521,10 +519,10 @@ void free_hyp_pgds(void)
if (hyp_pgd) {
unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
- for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
- for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+ unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
+ (uintptr_t)high_memory - PAGE_OFFSET);
+ unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
+ VMALLOC_END - VMALLOC_START);
free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL;
@@ -623,7 +621,7 @@ static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
return 0;
}
-static int __create_hyp_mappings(pgd_t *pgdp,
+static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
unsigned long start, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
@@ -636,7 +634,7 @@ static int __create_hyp_mappings(pgd_t *pgdp,
addr = start & PAGE_MASK;
end = PAGE_ALIGN(end);
do {
- pgd = pgdp + pgd_index(addr);
+ pgd = pgdp + ((addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1));
if (pgd_none(*pgd)) {
pud = pud_alloc_one(NULL, addr);
@@ -699,8 +697,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot)
int err;
phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
- err = __create_hyp_mappings(hyp_pgd, virt_addr,
- virt_addr + PAGE_SIZE,
+ err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
+ virt_addr, virt_addr + PAGE_SIZE,
__phys_to_pfn(phys_addr),
prot);
if (err)
@@ -731,7 +729,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
return -EINVAL;
- return __create_hyp_mappings(hyp_pgd, start, end,
+ return __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD, start, end,
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
}
@@ -1312,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- if (is_vm_hugetlb_page(vma) && !logging_active) {
+ if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
hugetlb = true;
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
} else {
@@ -1737,7 +1735,7 @@ static int kvm_map_idmap_text(pgd_t *pgd)
int err;
/* Create the idmap in the boot page tables */
- err = __create_hyp_mappings(pgd,
+ err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
hyp_idmap_start, hyp_idmap_end,
__phys_to_pfn(hyp_idmap_start),
PAGE_HYP_EXEC);
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 62310122ee78..743ca5cb05ef 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -285,9 +285,11 @@ int vgic_init(struct kvm *kvm)
if (ret)
goto out;
- ret = vgic_v4_init(kvm);
- if (ret)
- goto out;
+ if (vgic_has_its(kvm)) {
+ ret = vgic_v4_init(kvm);
+ if (ret)
+ goto out;
+ }
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_enable(vcpu);
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index b7baf581611a..99e026d2dade 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
u32 nr = dist->nr_spis;
int i, ret;
- entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry),
- GFP_KERNEL);
+ entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
if (!entries)
return -ENOMEM;
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 1f761a9991e7..8e633bd9cc1e 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -421,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
u32 *intids;
int nr_irqs, i;
unsigned long flags;
+ u8 pendmask;
nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
if (nr_irqs < 0)
@@ -428,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
for (i = 0; i < nr_irqs; i++) {
int byte_offset, bit_nr;
- u8 pendmask;
byte_offset = intids[i] / BITS_PER_BYTE;
bit_nr = intids[i] % BITS_PER_BYTE;
@@ -821,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
return E_ITS_MAPC_COLLECTION_OOR;
collection = kzalloc(sizeof(*collection), GFP_KERNEL);
+ if (!collection)
+ return -ENOMEM;
collection->collection_id = coll_id;
collection->target_addr = COLLECTION_NOT_MAPPED;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 2f05f732d3fd..f47e8481fa45 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -327,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
int last_byte_offset = -1;
struct vgic_irq *irq;
int ret;
+ u8 val;
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
int byte_offset, bit_nr;
struct kvm_vcpu *vcpu;
gpa_t pendbase, ptr;
bool stored;
- u8 val;
vcpu = irq->target_vcpu;
if (!vcpu)
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 53c324aa44ef..bc4265154bac 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -118,7 +118,7 @@ int vgic_v4_init(struct kvm *kvm)
struct kvm_vcpu *vcpu;
int i, nr_vcpus, ret;
- if (!vgic_supports_direct_msis(kvm))
+ if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */
if (dist->its_vm.vpes)
@@ -337,8 +337,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
goto out;
WARN_ON(!(irq->hw && irq->host_irq == virq));
- irq->hw = false;
- ret = its_unmap_vlpi(virq);
+ if (irq->hw) {
+ irq->hw = false;
+ ret = its_unmap_vlpi(virq);
+ }
out:
mutex_unlock(&its->its_lock);
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index b168a328a9e0..ecb8e25f5fe5 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -492,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
{
struct vgic_irq *irq;
+ unsigned long flags;
int ret = 0;
if (!vgic_initialized(vcpu->kvm))
@@ -502,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
return -EINVAL;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->owner && irq->owner != owner)
ret = -EEXIST;
else
irq->owner = owner;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
return ret;
}
@@ -823,13 +824,14 @@ void vgic_kick_vcpus(struct kvm *kvm)
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
+ struct vgic_irq *irq;
bool map_is_active;
unsigned long flags;
if (!vgic_initialized(vcpu->kvm))
return false;
+ irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
spin_lock_irqsave(&irq->irq_lock, flags);
map_is_active = irq->hw && irq->active;
spin_unlock_irqrestore(&irq->irq_lock, flags);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f2ac53ab8243..a334399fafec 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -188,7 +188,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct kvm_kernel_irqfd *irqfd =
container_of(wait, struct kvm_kernel_irqfd, wait);
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
struct kvm_kernel_irq_routing_entry irq;
struct kvm *kvm = irqfd->kvm;
unsigned seq;
@@ -287,7 +287,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
- unsigned int events;
+ __poll_t events;
int idx;
if (!kvm_arch_intc_initialized(kvm))
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 96689967f5c3..001085b611ad 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -135,6 +135,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
+__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
+{
+}
+
bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
{
if (pfn_valid(pfn))
@@ -360,6 +365,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
+
+ kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
+
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -468,6 +476,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
@@ -1314,17 +1323,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
-static int get_user_page_nowait(unsigned long start, int write,
- struct page **page)
-{
- int flags = FOLL_NOWAIT | FOLL_HWPOISON;
-
- if (write)
- flags |= FOLL_WRITE;
-
- return get_user_pages(start, 1, flags, page, NULL);
-}
-
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
@@ -1373,7 +1371,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool *writable, kvm_pfn_t *pfn)
{
- struct page *page[1];
+ unsigned int flags = FOLL_HWPOISON;
+ struct page *page;
int npages = 0;
might_sleep();
@@ -1381,35 +1380,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
if (writable)
*writable = write_fault;
- if (async) {
- down_read(&current->mm->mmap_sem);
- npages = get_user_page_nowait(addr, write_fault, page);
- up_read(&current->mm->mmap_sem);
- } else {
- unsigned int flags = FOLL_HWPOISON;
-
- if (write_fault)
- flags |= FOLL_WRITE;
+ if (write_fault)
+ flags |= FOLL_WRITE;
+ if (async)
+ flags |= FOLL_NOWAIT;
- npages = get_user_pages_unlocked(addr, 1, page, flags);
- }
+ npages = get_user_pages_unlocked(addr, 1, &page, flags);
if (npages != 1)
return npages;
/* map read fault as writable if possible */
if (unlikely(!write_fault) && writable) {
- struct page *wpage[1];
+ struct page *wpage;
- npages = __get_user_pages_fast(addr, 1, 1, wpage);
- if (npages == 1) {
+ if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
*writable = true;
- put_page(page[0]);
- page[0] = wpage[0];
+ put_page(page);
+ page = wpage;
}
-
- npages = 1;
}
- *pfn = page_to_pfn(page[0]);
+ *pfn = page_to_pfn(page);
return npages;
}