summaryrefslogtreecommitdiff
path: root/arch/riscv/include
diff options
context:
space:
mode:
authorPalmer Dabbelt <palmer@rivosinc.com>2022-03-31 02:17:54 +0300
committerPalmer Dabbelt <palmer@rivosinc.com>2022-03-31 02:17:54 +0300
commitbee7fbc38579ba86948689107518c855247d0b49 (patch)
treefc46307f316d99295d9d18d1e9ae406230002edf /arch/riscv/include
parentfdecfea09328b33fd08a4d418237cce9fd176d69 (diff)
parentc5179ef1ca0c39dab6955be6b0e3c034cc4164c8 (diff)
downloadlinux-bee7fbc38579ba86948689107518c855247d0b49.tar.xz
RISC-V CPU Idle Support
This series adds RISC-V CPU Idle support using SBI HSM suspend function. The RISC-V SBI CPU idle driver added by this series is highly inspired from the ARM PSCI CPU idle driver. Special thanks Sandeep Tripathy for providing early feeback on SBI HSM support in all above projects (RISC-V SBI specification, OpenSBI, and Linux RISC-V). * palmer/riscv-idle: RISC-V: Enable RISC-V SBI CPU Idle driver for QEMU virt machine dt-bindings: Add common bindings for ARM and RISC-V idle states cpuidle: Add RISC-V SBI CPU idle driver cpuidle: Factor-out power domain related code from PSCI domain driver RISC-V: Add SBI HSM suspend related defines RISC-V: Add arch functions for non-retentive suspend entry/exit RISC-V: Rename relocate() and make it global RISC-V: Enable CPU_IDLE drivers
Diffstat (limited to 'arch/riscv/include')
-rw-r--r--arch/riscv/include/asm/asm.h26
-rw-r--r--arch/riscv/include/asm/cpuidle.h24
-rw-r--r--arch/riscv/include/asm/suspend.h36
3 files changed, 86 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 618d7c5af1a2..8c2549b16ac0 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -67,4 +67,30 @@
#error "Unexpected __SIZEOF_SHORT__"
#endif
+#ifdef __ASSEMBLY__
+
+/* Common assembly source macros */
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+ REG_L t0, _xip_fixup
+ add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+ la t1, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
+.endm
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/cpuidle.h b/arch/riscv/include/asm/cpuidle.h
new file mode 100644
index 000000000000..71fdc607d4bc
--- /dev/null
+++ b/arch/riscv/include/asm/cpuidle.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Allwinner Ltd
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_CPUIDLE_H
+#define _ASM_RISCV_CPUIDLE_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+static inline void cpu_do_idle(void)
+{
+ /*
+ * Add mb() here to ensure that all
+ * IO/MEM accesses are completed prior
+ * to entering WFI.
+ */
+ mb();
+ wait_for_interrupt();
+}
+
+#endif
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
new file mode 100644
index 000000000000..8be391c2aecb
--- /dev/null
+++ b/arch/riscv/include/asm/suspend.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _ASM_RISCV_SUSPEND_H
+#define _ASM_RISCV_SUSPEND_H
+
+#include <asm/ptrace.h>
+
+struct suspend_context {
+ /* Saved and restored by low-level functions */
+ struct pt_regs regs;
+ /* Saved and restored by high-level functions */
+ unsigned long scratch;
+ unsigned long tvec;
+ unsigned long ie;
+#ifdef CONFIG_MMU
+ unsigned long satp;
+#endif
+};
+
+/* Low-level CPU suspend entry function */
+int __cpu_suspend_enter(struct suspend_context *context);
+
+/* High-level CPU suspend which will save context and call finish() */
+int cpu_suspend(unsigned long arg,
+ int (*finish)(unsigned long arg,
+ unsigned long entry,
+ unsigned long context));
+
+/* Low-level CPU resume entry function */
+int __cpu_resume_enter(unsigned long hartid, unsigned long context);
+
+#endif