summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2018-09-10 11:29:09 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-11-24 10:19:43 +0300
commit5a6f7274e67bcbcebbf8423d8ed56666563b46bf (patch)
treebc1dad797e55582f0cbb92330c0895ba45cb1d6b /arch/powerpc/kvm
parent38374aa3c9166e6d44363e400fb889ba6b8282ab (diff)
downloadlinux-5a6f7274e67bcbcebbf8423d8ed56666563b46bf.tar.xz
KVM: PPC: Inform the userspace about TCE update failures
[ Upstream commit f7960e299f13f069d6f3d4e157d91bfca2669677 ] We return H_TOO_HARD from TCE update handlers when we think that the next handler (realmode -> virtual mode -> user mode) has a chance to handle the request; H_HARDWARE/H_CLOSED otherwise. This changes the handlers to return H_TOO_HARD on every error giving the userspace an opportunity to handle any request or at least log them all. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c6
2 files changed, 7 insertions, 7 deletions
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 07a8004c3c23..65486c3d029b 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -401,7 +401,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
long ret;
if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (dir == DMA_NONE)
return H_SUCCESS;
@@ -449,15 +449,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
return H_TOO_HARD;
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (mm_iommu_mapped_inc(mem))
- return H_CLOSED;
+ return H_TOO_HARD;
ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
if (WARN_ON_ONCE(ret)) {
mm_iommu_mapped_dec(mem);
- return H_HARDWARE;
+ return H_TOO_HARD;
}
if (dir != DMA_NONE)
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index eb8b11515a7f..d258ed4ef77c 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -300,10 +300,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
&hpa)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
- return H_CLOSED;
+ return H_TOO_HARD;
ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
if (ret) {
@@ -501,7 +501,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
rmap = (void *) vmalloc_to_phys(rmap);
if (WARN_ON_ONCE_RM(!rmap))
- return H_HARDWARE;
+ return H_TOO_HARD;
/*
* Synchronize with the MMU notifier callbacks in