From faa2f72cb3569256480c5540d242c84e99965160 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Tue, 28 Jun 2022 15:56:02 +0200 Subject: KVM: s390: pv: leak the topmost page table when destroy fails Each secure guest must have a unique ASCE (address space control element); we must avoid that new guests use the same page for their ASCE, to avoid errors. Since the ASCE mostly consists of the address of the topmost page table (plus some flags), we must not return that memory to the pool unless the ASCE is no longer in use. Only a successful Destroy Secure Configuration UVC will make the ASCE reusable again. If the Destroy Configuration UVC fails, the ASCE cannot be reused for a secure guest (either for the ASCE or for other memory areas). To avoid a collision, it must not be used again. This is a permanent error and the page becomes in practice unusable, so we set it aside and leak it. On failure we already leak other memory that belongs to the ultravisor (i.e. the variable and base storage for a guest) and not leaking the topmost page table was an oversight. This error (and thus the leakage) should not happen unless the hardware is broken or KVM has some unknown serious bug. Signed-off-by: Claudio Imbrenda Fixes: 29b40f105ec8d55 ("KVM: s390: protvirt: Add initial vm and cpu lifecycle handling") Reviewed-by: Janosch Frank Link: https://lore.kernel.org/r/20220628135619.32410-2-imbrenda@linux.ibm.com Message-Id: <20220628135619.32410-2-imbrenda@linux.ibm.com> Signed-off-by: Janosch Frank --- arch/s390/mm/gmap.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) (limited to 'arch/s390/mm') diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index b8ae4a4aa2ba..85cab61d87a9 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2735,3 +2735,89 @@ void s390_reset_acc(struct mm_struct *mm) mmput(mm); } EXPORT_SYMBOL_GPL(s390_reset_acc); + +/** + * s390_unlist_old_asce - Remove the topmost level of page tables from the + * list of page tables of the gmap. + * @gmap: the gmap whose table is to be removed + * + * On s390x, KVM keeps a list of all pages containing the page tables of the + * gmap (the CRST list). This list is used at tear down time to free all + * pages that are now not needed anymore. + * + * This function removes the topmost page of the tree (the one pointed to by + * the ASCE) from the CRST list. + * + * This means that it will not be freed when the VM is torn down, and needs + * to be handled separately by the caller, unless a leak is actually + * intended. Notice that this function will only remove the page from the + * list, the page will still be used as a top level page table (and ASCE). + */ +void s390_unlist_old_asce(struct gmap *gmap) +{ + struct page *old; + + old = virt_to_page(gmap->table); + spin_lock(&gmap->guest_table_lock); + list_del(&old->lru); + /* + * Sometimes the topmost page might need to be "removed" multiple + * times, for example if the VM is rebooted into secure mode several + * times concurrently, or if s390_replace_asce fails after calling + * s390_remove_old_asce and is attempted again later. In that case + * the old asce has been removed from the list, and therefore it + * will not be freed when the VM terminates, but the ASCE is still + * in use and still pointed to. + * A subsequent call to replace_asce will follow the pointer and try + * to remove the same page from the list again. + * Therefore it's necessary that the page of the ASCE has valid + * pointers, so list_del can work (and do nothing) without + * dereferencing stale or invalid pointers. + */ + INIT_LIST_HEAD(&old->lru); + spin_unlock(&gmap->guest_table_lock); +} +EXPORT_SYMBOL_GPL(s390_unlist_old_asce); + +/** + * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy + * @gmap: the gmap whose ASCE needs to be replaced + * + * If the allocation of the new top level page table fails, the ASCE is not + * replaced. + * In any case, the old ASCE is always removed from the gmap CRST list. + * Therefore the caller has to make sure to save a pointer to it + * beforehand, unless a leak is actually intended. + */ +int s390_replace_asce(struct gmap *gmap) +{ + unsigned long asce; + struct page *page; + void *table; + + s390_unlist_old_asce(gmap); + + page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); + if (!page) + return -ENOMEM; + table = page_to_virt(page); + memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT)); + + /* + * The caller has to deal with the old ASCE, but here we make sure + * the new one is properly added to the CRST list, so that + * it will be freed when the VM is torn down. + */ + spin_lock(&gmap->guest_table_lock); + list_add(&page->lru, &gmap->crst_list); + spin_unlock(&gmap->guest_table_lock); + + /* Set new table origin while preserving existing ASCE control bits */ + asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table); + WRITE_ONCE(gmap->asce, asce); + WRITE_ONCE(gmap->mm->context.gmap_asce, asce); + WRITE_ONCE(gmap->table, table); + + return 0; +} +EXPORT_SYMBOL_GPL(s390_replace_asce); -- cgit v1.2.3 From a52c25848e3143fbced80e6835de4034cc461fec Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Tue, 28 Jun 2022 15:56:03 +0200 Subject: KVM: s390: pv: handle secure storage violations for protected guests A secure storage violation is triggered when a protected guest tries to access secure memory that has been mapped erroneously, or that belongs to a different protected guest or to the ultravisor. With upcoming patches, protected guests will be able to trigger secure storage violations in normal operation. This happens for example if a protected guest is rebooted with deferred destroy enabled and the new guest is also protected. When the new protected guest touches pages that have not yet been destroyed, and thus are accounted to the previous protected guest, a secure storage violation is raised. This patch adds handling of secure storage violations for protected guests. This exception is handled by first trying to destroy the page, because it is expected to belong to a defunct protected guest where a destroy should be possible. Note that a secure page can only be destroyed if its protected VM does not have any CPUs, which only happens when the protected VM is being terminated. If that fails, a normal export of the page is attempted. This means that pages that trigger the exception will be made non-secure (in one way or another) before attempting to use them again for a different secure guest. Signed-off-by: Claudio Imbrenda Acked-by: Janosch Frank Link: https://lore.kernel.org/r/20220628135619.32410-3-imbrenda@linux.ibm.com Message-Id: <20220628135619.32410-3-imbrenda@linux.ibm.com> Signed-off-by: Janosch Frank --- arch/s390/include/asm/uv.h | 1 + arch/s390/kernel/uv.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++ arch/s390/mm/fault.c | 10 +++++++++ 3 files changed, 66 insertions(+) (limited to 'arch/s390/mm') diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 18fe04c8547e..be3ef9dd6972 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -426,6 +426,7 @@ static inline int is_prot_virt_host(void) } int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); +int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr); int uv_destroy_owned_page(unsigned long paddr); int uv_convert_from_secure(unsigned long paddr); int uv_convert_owned_from_secure(unsigned long paddr); diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index c13d5a7b71f0..4c91a3dbc05b 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -334,6 +334,61 @@ int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr) } EXPORT_SYMBOL_GPL(gmap_convert_to_secure); +/** + * gmap_destroy_page - Destroy a guest page. + * @gmap: the gmap of the guest + * @gaddr: the guest address to destroy + * + * An attempt will be made to destroy the given guest page. If the attempt + * fails, an attempt is made to export the page. If both attempts fail, an + * appropriate error is returned. + */ +int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr) +{ + struct vm_area_struct *vma; + unsigned long uaddr; + struct page *page; + int rc; + + rc = -EFAULT; + mmap_read_lock(gmap->mm); + + uaddr = __gmap_translate(gmap, gaddr); + if (IS_ERR_VALUE(uaddr)) + goto out; + vma = vma_lookup(gmap->mm, uaddr); + if (!vma) + goto out; + /* + * Huge pages should not be able to become secure + */ + if (is_vm_hugetlb_page(vma)) + goto out; + + rc = 0; + /* we take an extra reference here */ + page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET); + if (IS_ERR_OR_NULL(page)) + goto out; + rc = uv_destroy_owned_page(page_to_phys(page)); + /* + * Fault handlers can race; it is possible that two CPUs will fault + * on the same secure page. One CPU can destroy the page, reboot, + * re-enter secure mode and import it, while the second CPU was + * stuck at the beginning of the handler. At some point the second + * CPU will be able to progress, and it will not be able to destroy + * the page. In that case we do not want to terminate the process, + * we instead try to export the page. + */ + if (rc) + rc = uv_convert_owned_from_secure(page_to_phys(page)); + put_page(page); +out: + mmap_read_unlock(gmap->mm); + return rc; +} +EXPORT_SYMBOL_GPL(gmap_destroy_page); + /* * To be called with the page locked or with an extra reference! This will * prevent gmap_make_secure from touching the page concurrently. Having 2 diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index e173b6187ad5..af1ac49168fb 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -837,6 +837,16 @@ NOKPROBE_SYMBOL(do_non_secure_storage_access); void do_secure_storage_violation(struct pt_regs *regs) { + unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK; + struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; + + /* + * If the VM has been rebooted, its address space might still contain + * secure pages from the previous boot. + * Clear the page so it can be reused. + */ + if (!gmap_destroy_page(gmap, gaddr)) + return; /* * Either KVM messed up the secure guest mapping or the same * page is mapped into multiple secure guests. -- cgit v1.2.3 From b108f7f0a29b24a94e0c4f9af20f84afaa6ef245 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Tue, 28 Jun 2022 15:56:04 +0200 Subject: KVM: s390: pv: handle secure storage exceptions for normal guests With upcoming patches, normal guests might touch secure pages. This patch extends the existing exception handler to convert the pages to non secure also when the exception is triggered by a normal guest. This can happen for example when a secure guest reboots; the first stage of a secure guest is non secure, and in general a secure guest can reboot into non-secure mode. If the secure memory of the previous boot has not been cleared up completely yet (which will be allowed to happen in an upcoming patch), a non-secure guest might touch secure memory, which will need to be handled properly. This means that gmap faults must be handled and not cause termination of the process. The handling is the same as userspace accesses, it's enough to translate the gmap address to a user address and then let the normal user fault code handle it. Signed-off-by: Claudio Imbrenda Reviewed-by: Janosch Frank Link: https://lore.kernel.org/r/20220628135619.32410-4-imbrenda@linux.ibm.com Message-Id: <20220628135619.32410-4-imbrenda@linux.ibm.com> Signed-off-by: Janosch Frank --- arch/s390/mm/fault.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index af1ac49168fb..ee7871f770fb 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -754,6 +754,7 @@ void do_secure_storage_access(struct pt_regs *regs) struct vm_area_struct *vma; struct mm_struct *mm; struct page *page; + struct gmap *gmap; int rc; /* @@ -783,6 +784,17 @@ void do_secure_storage_access(struct pt_regs *regs) } switch (get_fault_type(regs)) { + case GMAP_FAULT: + mm = current->mm; + gmap = (struct gmap *)S390_lowcore.gmap; + mmap_read_lock(mm); + addr = __gmap_translate(gmap, addr); + mmap_read_unlock(mm); + if (IS_ERR_VALUE(addr)) { + do_fault_error(regs, VM_ACCESS_FLAGS, VM_FAULT_BADMAP); + break; + } + fallthrough; case USER_FAULT: mm = current->mm; mmap_read_lock(mm); @@ -811,7 +823,6 @@ void do_secure_storage_access(struct pt_regs *regs) if (rc) BUG(); break; - case GMAP_FAULT: default: do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); WARN_ON_ONCE(1); -- cgit v1.2.3 From 6f73517d0a99ba8ec972bf053456f1c2932598c0 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Tue, 28 Jun 2022 15:56:05 +0200 Subject: KVM: s390: pv: refactor s390_reset_acc Refactor s390_reset_acc so that it can be reused in upcoming patches. We don't want to hold all the locks used in a walk_page_range for too long, and the destroy page UVC does take some time to complete. Therefore we quickly gather the pages to destroy, and then destroy them without holding all the locks. The new refactored function optionally allows to return early without completing if a fatal signal is pending (and return and appropriate error code). Two wrappers are provided to call the new function. Signed-off-by: Claudio Imbrenda Reviewed-by: Janosch Frank Reviewed-by: Nico Boehr Link: https://lore.kernel.org/r/20220628135619.32410-5-imbrenda@linux.ibm.com Message-Id: <20220628135619.32410-5-imbrenda@linux.ibm.com> Signed-off-by: Janosch Frank --- arch/s390/include/asm/gmap.h | 37 ++++++++++++++++- arch/s390/kvm/pv.c | 12 +++++- arch/s390/mm/gmap.c | 99 +++++++++++++++++++++++++++++++------------- 3 files changed, 116 insertions(+), 32 deletions(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index f4073106e1f3..5cc46e0dde62 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -147,7 +147,42 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start, void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4], unsigned long gaddr, unsigned long vmaddr); int gmap_mark_unmergeable(void); -void s390_reset_acc(struct mm_struct *mm); void s390_unlist_old_asce(struct gmap *gmap); int s390_replace_asce(struct gmap *gmap); +void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns); +int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, + unsigned long end, bool interruptible); + +/** + * s390_uv_destroy_range - Destroy a range of pages in the given mm. + * @mm: the mm on which to operate on + * @start: the start of the range + * @end: the end of the range + * + * This function will call cond_sched, so it should not generate stalls, but + * it will otherwise only return when it completed. + */ +static inline void s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + (void)__s390_uv_destroy_range(mm, start, end, false); +} + +/** + * s390_uv_destroy_range_interruptible - Destroy a range of pages in the + * given mm, but stop when a fatal signal is received. + * @mm: the mm on which to operate on + * @start: the start of the range + * @end: the end of the range + * + * This function will call cond_sched, so it should not generate stalls. If + * a fatal signal is received, it will return with -EINTR immediately, + * without finishing destroying the whole range. Upon successful + * completion, 0 is returned. + */ +static inline int s390_uv_destroy_range_interruptible(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + return __s390_uv_destroy_range(mm, start, end, true); +} #endif /* _ASM_S390_GMAP_H */ diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index f1733812a263..a556db3912a1 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include "kvm-s390.h" int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) @@ -153,8 +155,14 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) { int cc; - /* make all pages accessible before destroying the guest */ - s390_reset_acc(kvm->mm); + /* + * if the mm still has a mapping, make all its pages accessible + * before destroying the guest + */ + if (mmget_not_zero(kvm->mm)) { + s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE); + mmput(kvm->mm); + } cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), UVC_CMD_DESTROY_SEC_CONF, rc, rrc); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 85cab61d87a9..62758cb5872f 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2697,44 +2697,85 @@ void s390_reset_cmma(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(s390_reset_cmma); -/* - * make inaccessible pages accessible again - */ -static int __s390_reset_acc(pte_t *ptep, unsigned long addr, - unsigned long next, struct mm_walk *walk) +#define GATHER_GET_PAGES 32 + +struct reset_walk_state { + unsigned long next; + unsigned long count; + unsigned long pfns[GATHER_GET_PAGES]; +}; + +static int s390_gather_pages(pte_t *ptep, unsigned long addr, + unsigned long next, struct mm_walk *walk) { + struct reset_walk_state *p = walk->private; pte_t pte = READ_ONCE(*ptep); - /* There is a reference through the mapping */ - if (pte_present(pte)) - WARN_ON_ONCE(uv_destroy_owned_page(pte_val(pte) & PAGE_MASK)); - - return 0; + if (pte_present(pte)) { + /* we have a reference from the mapping, take an extra one */ + get_page(phys_to_page(pte_val(pte))); + p->pfns[p->count] = phys_to_pfn(pte_val(pte)); + p->next = next; + p->count++; + } + return p->count >= GATHER_GET_PAGES; } -static const struct mm_walk_ops reset_acc_walk_ops = { - .pte_entry = __s390_reset_acc, +static const struct mm_walk_ops gather_pages_ops = { + .pte_entry = s390_gather_pages, }; -#include -void s390_reset_acc(struct mm_struct *mm) +/* + * Call the Destroy secure page UVC on each page in the given array of PFNs. + * Each page needs to have an extra reference, which will be released here. + */ +void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns) { - if (!mm_is_protected(mm)) - return; - /* - * we might be called during - * reset: we walk the pages and clear - * close of all kvm file descriptors: we walk the pages and clear - * exit of process on fd closure: vma already gone, do nothing - */ - if (!mmget_not_zero(mm)) - return; - mmap_read_lock(mm); - walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL); - mmap_read_unlock(mm); - mmput(mm); + unsigned long i; + + for (i = 0; i < count; i++) { + /* we always have an extra reference */ + uv_destroy_owned_page(pfn_to_phys(pfns[i])); + /* get rid of the extra reference */ + put_page(pfn_to_page(pfns[i])); + cond_resched(); + } +} +EXPORT_SYMBOL_GPL(s390_uv_destroy_pfns); + +/** + * __s390_uv_destroy_range - Call the destroy secure page UVC on each page + * in the given range of the given address space. + * @mm: the mm to operate on + * @start: the start of the range + * @end: the end of the range + * @interruptible: if not 0, stop when a fatal signal is received + * + * Walk the given range of the given address space and call the destroy + * secure page UVC on each page. Optionally exit early if a fatal signal is + * pending. + * + * Return: 0 on success, -EINTR if the function stopped before completing + */ +int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, + unsigned long end, bool interruptible) +{ + struct reset_walk_state state = { .next = start }; + int r = 1; + + while (r > 0) { + state.count = 0; + mmap_read_lock(mm); + r = walk_page_range(mm, state.next, end, &gather_pages_ops, &state); + mmap_read_unlock(mm); + cond_resched(); + s390_uv_destroy_pfns(state.count, state.pfns); + if (interruptible && fatal_signal_pending(current)) + return -EINTR; + } + return 0; } -EXPORT_SYMBOL_GPL(s390_reset_acc); +EXPORT_SYMBOL_GPL(__s390_uv_destroy_range); /** * s390_unlist_old_asce - Remove the topmost level of page tables from the -- cgit v1.2.3