summaryrefslogtreecommitdiff
path: root/arch/s390/kernel/machine_kexec.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/machine_kexec.c')
-rw-r--r--arch/s390/kernel/machine_kexec.c192
1 files changed, 186 insertions, 6 deletions
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b09b9c62573e..3cd0f25ab015 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -1,10 +1,11 @@
/*
* arch/s390/kernel/machine_kexec.c
*
- * Copyright IBM Corp. 2005,2006
+ * Copyright IBM Corp. 2005,2011
*
* Author(s): Rolf Adelsberger,
* Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
#include <linux/device.h>
@@ -21,12 +22,162 @@
#include <asm/smp.h>
#include <asm/reset.h>
#include <asm/ipl.h>
+#include <asm/diag.h>
+#include <asm/asm-offsets.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
extern const unsigned char relocate_kernel[];
extern const unsigned long long relocate_kernel_len;
+#ifdef CONFIG_CRASH_DUMP
+
+void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
+
+/*
+ * Create ELF notes for one CPU
+ */
+static void add_elf_notes(int cpu)
+{
+ struct save_area *sa = (void *) 4608 + store_prefix();
+ void *ptr;
+
+ memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
+ ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
+ ptr = fill_cpu_elf_notes(ptr, sa);
+ memset(ptr, 0, sizeof(struct elf_note));
+}
+
+/*
+ * Store status of next available physical CPU
+ */
+static int store_status_next(int start_cpu, int this_cpu)
+{
+ struct save_area *sa = (void *) 4608 + store_prefix();
+ int cpu, rc;
+
+ for (cpu = start_cpu; cpu < 65536; cpu++) {
+ if (cpu == this_cpu)
+ continue;
+ do {
+ rc = raw_sigp(cpu, sigp_stop_and_store_status);
+ } while (rc == sigp_busy);
+ if (rc != sigp_order_code_accepted)
+ continue;
+ if (sa->pref_reg)
+ return cpu;
+ }
+ return -1;
+}
+
+/*
+ * Initialize CPU ELF notes
+ */
+void setup_regs(void)
+{
+ unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
+ int cpu, this_cpu, phys_cpu = 0, first = 1;
+
+ this_cpu = stap();
+
+ if (!S390_lowcore.prefixreg_save_area)
+ first = 0;
+ for_each_online_cpu(cpu) {
+ if (first) {
+ add_elf_notes(cpu);
+ first = 0;
+ continue;
+ }
+ phys_cpu = store_status_next(phys_cpu, this_cpu);
+ if (phys_cpu == -1)
+ break;
+ add_elf_notes(cpu);
+ phys_cpu++;
+ }
+ /* Copy dump CPU store status info to absolute zero */
+ memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
+}
+
+#endif
+
+/*
+ * Start kdump: We expect here that a store status has been done on our CPU
+ */
+static void __do_machine_kdump(void *image)
+{
+#ifdef CONFIG_CRASH_DUMP
+ int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
+
+ __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
+ setup_regs();
+ start_kdump(1);
+#endif
+}
+
+/*
+ * Check if kdump checksums are valid: We call purgatory with parameter "0"
+ */
+static int kdump_csum_valid(struct kimage *image)
+{
+#ifdef CONFIG_CRASH_DUMP
+ int (*start_kdump)(int) = (void *)image->start;
+ int rc;
+
+ __arch_local_irq_stnsm(0xfb); /* disable DAT */
+ rc = start_kdump(0);
+ __arch_local_irq_stosm(0x04); /* enable DAT */
+ return rc ? 0 : -EINVAL;
+#else
+ return -EINVAL;
+#endif
+}
+
+/*
+ * Map or unmap crashkernel memory
+ */
+static void crash_map_pages(int enable)
+{
+ unsigned long size = resource_size(&crashk_res);
+
+ BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN ||
+ size % KEXEC_CRASH_MEM_ALIGN);
+ if (enable)
+ vmem_add_mapping(crashk_res.start, size);
+ else
+ vmem_remove_mapping(crashk_res.start, size);
+}
+
+/*
+ * Map crashkernel memory
+ */
+void crash_map_reserved_pages(void)
+{
+ crash_map_pages(1);
+}
+
+/*
+ * Unmap crashkernel memory
+ */
+void crash_unmap_reserved_pages(void)
+{
+ crash_map_pages(0);
+}
+
+/*
+ * Give back memory to hypervisor before new kdump is loaded
+ */
+static int machine_kexec_prepare_kdump(void)
+{
+#ifdef CONFIG_CRASH_DUMP
+ if (MACHINE_IS_VM)
+ diag10_range(PFN_DOWN(crashk_res.start),
+ PFN_DOWN(crashk_res.end - crashk_res.start + 1));
+ return 0;
+#else
+ return -EINVAL;
+#endif
+}
+
int machine_kexec_prepare(struct kimage *image)
{
void *reboot_code_buffer;
@@ -35,6 +186,9 @@ int machine_kexec_prepare(struct kimage *image)
if (ipl_flags & IPL_NSS_VALID)
return -ENOSYS;
+ if (image->type == KEXEC_TYPE_CRASH)
+ return machine_kexec_prepare_kdump();
+
/* We don't support anything but the default image type for now. */
if (image->type != KEXEC_TYPE_DEFAULT)
return -EINVAL;
@@ -51,27 +205,53 @@ void machine_kexec_cleanup(struct kimage *image)
{
}
+void arch_crash_save_vmcoreinfo(void)
+{
+ VMCOREINFO_SYMBOL(lowcore_ptr);
+ VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
+}
+
void machine_shutdown(void)
{
}
-static void __machine_kexec(void *data)
+/*
+ * Do normal kexec
+ */
+static void __do_machine_kexec(void *data)
{
relocate_kernel_t data_mover;
struct kimage *image = data;
- pfault_fini();
- s390_reset_system();
-
data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
/* Call the moving routine */
(*data_mover)(&image->head, image->start);
- for (;;);
}
+/*
+ * Reset system and call either kdump or normal kexec
+ */
+static void __machine_kexec(void *data)
+{
+ struct kimage *image = data;
+
+ pfault_fini();
+ if (image->type == KEXEC_TYPE_CRASH)
+ s390_reset_system(__do_machine_kdump, data);
+ else
+ s390_reset_system(__do_machine_kexec, data);
+ disabled_wait((unsigned long) __builtin_return_address(0));
+}
+
+/*
+ * Do either kdump or normal kexec. In case of kdump we first ask
+ * purgatory, if kdump checksums are valid.
+ */
void machine_kexec(struct kimage *image)
{
+ if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
+ return;
tracer_disable();
smp_send_stop();
smp_switch_to_ipl_cpu(__machine_kexec, image);