summaryrefslogtreecommitdiff
path: root/arch/arm/mach-socfpga
diff options
context:
space:
mode:
authorChee Hong Ang <chee.hong.ang@intel.com>2020-12-24 13:20:58 +0300
committerLey Foon Tan <ley.foon.tan@intel.com>2021-01-15 12:48:35 +0300
commitac0c1fda90d4ec460dc32cb8bf8257e0eb3fe7b0 (patch)
treefbe7162c77b2b4769ddd25c79c4c7711073a7802 /arch/arm/mach-socfpga
parent200846f00870132aabf7ee6a2b95b24b6edb9091 (diff)
downloadu-boot-ac0c1fda90d4ec460dc32cb8bf8257e0eb3fe7b0.tar.xz
arm: socfpga: soc64: Override 'lowlevel_init' to support ATF
Override 'lowlevel_init' to make sure secondary CPUs trapped in ATF instead of SPL. After ATF is initialized, it will signal the secondary CPUs to jump from SPL to ATF waiting to be 'activated' by Linux OS via PSCI call. Signed-off-by: Chee Hong Ang <chee.hong.ang@intel.com>
Diffstat (limited to 'arch/arm/mach-socfpga')
-rw-r--r--arch/arm/mach-socfpga/Makefile2
-rw-r--r--arch/arm/mach-socfpga/lowlevel_init_soc64.S76
2 files changed, 78 insertions, 0 deletions
diff --git a/arch/arm/mach-socfpga/Makefile b/arch/arm/mach-socfpga/Makefile
index 418f543b20..c63162a5c6 100644
--- a/arch/arm/mach-socfpga/Makefile
+++ b/arch/arm/mach-socfpga/Makefile
@@ -29,6 +29,7 @@ endif
ifdef CONFIG_TARGET_SOCFPGA_STRATIX10
obj-y += clock_manager_s10.o
+obj-y += lowlevel_init_soc64.o
obj-y += mailbox_s10.o
obj-y += misc_s10.o
obj-y += mmu-arm64_s10.o
@@ -41,6 +42,7 @@ endif
ifdef CONFIG_TARGET_SOCFPGA_AGILEX
obj-y += clock_manager_agilex.o
+obj-y += lowlevel_init_soc64.o
obj-y += mailbox_s10.o
obj-y += misc_s10.o
obj-y += mmu-arm64_s10.o
diff --git a/arch/arm/mach-socfpga/lowlevel_init_soc64.S b/arch/arm/mach-socfpga/lowlevel_init_soc64.S
new file mode 100644
index 0000000000..612ea8a037
--- /dev/null
+++ b/arch/arm/mach-socfpga/lowlevel_init_soc64.S
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2020 Intel Corporation. All rights reserved
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <asm-offsets.h>
+#include <config.h>
+#include <linux/linkage.h>
+#include <asm/macro.h>
+
+ENTRY(lowlevel_init)
+ mov x29, lr /* Save LR */
+
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+#if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_ATF)
+wait_for_atf:
+ ldr x4, =CPU_RELEASE_ADDR
+ ldr x5, [x4]
+ cbz x5, slave_wait_atf
+ br x5
+slave_wait_atf:
+ branch_if_slave x0, wait_for_atf
+#else
+ branch_if_slave x0, 1f
+#endif
+ ldr x0, =GICD_BASE
+ bl gic_init_secure
+1:
+#if defined(CONFIG_GICV3)
+ ldr x0, =GICR_BASE
+ bl gic_init_secure_percpu
+#elif defined(CONFIG_GICV2)
+ ldr x0, =GICD_BASE
+ ldr x1, =GICC_BASE
+ bl gic_init_secure_percpu
+#endif
+#endif
+
+#ifdef CONFIG_ARMV8_MULTIENTRY
+ branch_if_master x0, x1, 2f
+
+ /*
+ * Slave should wait for master clearing spin table.
+ * This sync prevent slaves observing incorrect
+ * value of spin table and jumping to wrong place.
+ */
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+#ifdef CONFIG_GICV2
+ ldr x0, =GICC_BASE
+#endif
+ bl gic_wait_for_interrupt
+#endif
+
+ /*
+ * All slaves will enter EL2 and optionally EL1.
+ */
+ adr x4, lowlevel_in_el2
+ ldr x5, =ES_TO_AARCH64
+ bl armv8_switch_to_el2
+
+lowlevel_in_el2:
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
+ adr x4, lowlevel_in_el1
+ ldr x5, =ES_TO_AARCH64
+ bl armv8_switch_to_el1
+
+lowlevel_in_el1:
+#endif
+
+#endif /* CONFIG_ARMV8_MULTIENTRY */
+
+2:
+ mov lr, x29 /* Restore LR */
+ ret
+ENDPROC(lowlevel_init)