summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorAlexander Gordeev <agordeev@linux.ibm.com>2022-07-24 16:02:16 +0300
committerVasily Gorbik <gor@linux.ibm.com>2022-09-14 17:46:01 +0300
commit2f0e8aae26a27fe73d033788f8e92188e7584f41 (patch)
tree07c8abbd98e7b51142126c6c5880eaeea65ccaf9 /arch/s390/mm
parent14a3a2624285d36624966935ec12f228d876c028 (diff)
downloadlinux-2f0e8aae26a27fe73d033788f8e92188e7584f41.tar.xz
s390/mm: rework memcpy_real() to avoid DAT-off mode
Function memcpy_real() is an univeral data mover that does not require DAT mode to be able reading from a physical address. Its advantage is an ability to read from any address, even those for which no kernel virtual mapping exists. Although memcpy_real() is interrupt-safe, there are no handlers that make use of this function. The compiler instrumentation have to be disabled and separate no-DAT stack used to allow execution of the function once DAT mode is disabled. Rework memcpy_real() to overcome these shortcomings. As result, data copying (which is primarily reading out a crashed system memory by a user process) is executed on a regular stack with enabled interrupts. Also, use of memcpy_real_buf swap buffer becomes unnecessary and the swapping is eliminated. The above is achieved by using a fixed virtual address range that spans a single page and remaps that page repeatedly when memcpy_real() is called for a particular physical address. Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/maccess.c107
-rw-r--r--arch/s390/mm/vmem.c2
2 files changed, 47 insertions, 62 deletions
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index b8451ddbb3d6..bd1bcbb02938 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -12,12 +12,17 @@
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/cpu.h>
+#include <linux/uio.h>
#include <asm/asm-extable.h>
#include <asm/ctl_reg.h>
#include <asm/io.h>
#include <asm/abs_lowcore.h>
#include <asm/stacktrace.h>
+unsigned long __bootdata_preserved(__memcpy_real_area);
+static __ro_after_init pte_t *memcpy_real_ptep;
+static DEFINE_MUTEX(memcpy_real_mutex);
+
static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
{
unsigned long aligned, offset, count;
@@ -77,75 +82,55 @@ notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
return dst;
}
-static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
+void __init memcpy_real_init(void)
{
- union register_pair _dst, _src;
- int rc = -EFAULT;
-
- _dst.even = (unsigned long) dest;
- _dst.odd = (unsigned long) count;
- _src.even = (unsigned long) src;
- _src.odd = (unsigned long) count;
- asm volatile (
- "0: mvcle %[dst],%[src],0\n"
- "1: jo 0b\n"
- " lhi %[rc],0\n"
- "2:\n"
- EX_TABLE(1b,2b)
- : [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair)
- : : "cc", "memory");
- return rc;
+ memcpy_real_ptep = vmem_get_alloc_pte(__memcpy_real_area, true);
+ if (!memcpy_real_ptep)
+ panic("Couldn't setup memcpy real area");
}
-static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
- unsigned long src,
- unsigned long count)
+size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count)
{
- int irqs_disabled, rc;
- unsigned long flags;
-
- if (!count)
- return 0;
- flags = arch_local_irq_save();
- irqs_disabled = arch_irqs_disabled_flags(flags);
- if (!irqs_disabled)
- trace_hardirqs_off();
- __arch_local_irq_stnsm(0xf8); // disable DAT
- rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
- if (flags & PSW_MASK_DAT)
- __arch_local_irq_stosm(0x04); // enable DAT
- if (!irqs_disabled)
- trace_hardirqs_on();
- __arch_local_irq_ssm(flags);
- return rc;
+ size_t len, copied, res = 0;
+ unsigned long phys, offset;
+ void *chunk;
+ pte_t pte;
+
+ while (count) {
+ phys = src & PAGE_MASK;
+ offset = src & ~PAGE_MASK;
+ chunk = (void *)(__memcpy_real_area + offset);
+ len = min(count, PAGE_SIZE - offset);
+ pte = mk_pte_phys(phys, PAGE_KERNEL_RO);
+
+ mutex_lock(&memcpy_real_mutex);
+ if (pte_val(pte) != pte_val(*memcpy_real_ptep)) {
+ __ptep_ipte(__memcpy_real_area, memcpy_real_ptep, 0, 0, IPTE_GLOBAL);
+ set_pte(memcpy_real_ptep, pte);
+ }
+ copied = copy_to_iter(chunk, len, iter);
+ mutex_unlock(&memcpy_real_mutex);
+
+ count -= copied;
+ src += copied;
+ res += copied;
+ if (copied < len)
+ break;
+ }
+ return res;
}
-/*
- * Copy memory in real mode (kernel to kernel)
- */
int memcpy_real(void *dest, unsigned long src, size_t count)
{
- unsigned long _dest = (unsigned long)dest;
- unsigned long _src = (unsigned long)src;
- unsigned long _count = (unsigned long)count;
- int rc;
-
- if (S390_lowcore.nodat_stack != 0) {
- preempt_disable();
- rc = call_on_stack(3, S390_lowcore.nodat_stack,
- unsigned long, _memcpy_real,
- unsigned long, _dest,
- unsigned long, _src,
- unsigned long, _count);
- preempt_enable();
- return rc;
- }
- /*
- * This is a really early memcpy_real call, the stacks are
- * not set up yet. Just call _memcpy_real on the early boot
- * stack
- */
- return _memcpy_real(_dest, _src, _count);
+ struct iov_iter iter;
+ struct kvec kvec;
+
+ kvec.iov_base = dest;
+ kvec.iov_len = count;
+ iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
+ if (memcpy_real_iter(&iter, src, count) < count)
+ return -EFAULT;
+ return 0;
}
/*
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index a50a809e024f..ee1a97078527 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -567,7 +567,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
* while traversing is an error, since the function is expected to be
* called against virtual regions reserverd for 4KB mappings only.
*/
-static pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
+pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
{
pte_t *ptep = NULL;
pgd_t *pgd;