summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/mcpm.h
diff options
context:
space:
mode:
authorDave Martin <dave.martin@linaro.org>2012-07-17 17:25:42 +0400
committerNicolas Pitre <nicolas.pitre@linaro.org>2013-04-24 18:37:00 +0400
commit7fe31d28e839f9565c8176ec584676a045970802 (patch)
treec3fcc8b99461e1fbff13eb08f8dbec891dc1baa4 /arch/arm/include/asm/mcpm.h
parent7c2b860534d02d11923dd0504b961f21508173f1 (diff)
downloadlinux-7fe31d28e839f9565c8176ec584676a045970802.tar.xz
ARM: mcpm: introduce helpers for platform coherency exit/setup
This provides helper methods to coordinate between CPUs coming down and CPUs going up, as well as documentation on the used algorithms, so that cluster teardown and setup operations are not done for a cluster simultaneously. For use in the power_down() implementation: * __mcpm_cpu_going_down(unsigned int cluster, unsigned int cpu) * __mcpm_outbound_enter_critical(unsigned int cluster) * __mcpm_outbound_leave_critical(unsigned int cluster) * __mcpm_cpu_down(unsigned int cluster, unsigned int cpu) The power_up_setup() helper should do platform-specific setup in preparation for turning the CPU on, such as invalidating local caches or entering coherency. It must be assembler for now, since it must run before the MMU can be switched on. It is passed the affinity level for which initialization should be performed. Because the mcpm_sync_struct content is looked-up and modified with the cache enabled or disabled depending on the code path, it is crucial to always ensure proper cache maintenance to update main memory right away. The sync_cache_*() helpers are used to that end. Also, in order to prevent a cached writer from interfering with an adjacent non-cached writer, we ensure each state variable is located to a separate cache line. Thanks to Nicolas Pitre and Achin Gupta for the help with this patch. Signed-off-by: Dave Martin <dave.martin@linaro.org> Signed-off-by: Nicolas Pitre <nico@linaro.org> Reviewed-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/include/asm/mcpm.h')
-rw-r--r--arch/arm/include/asm/mcpm.h73
1 files changed, 73 insertions, 0 deletions
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 627761fce780..3046e90210cb 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -24,6 +24,9 @@
#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+
/*
* Platform specific code should use this symbol to set up secondary
* entry location for processors to use when released from reset.
@@ -130,5 +133,75 @@ struct mcpm_platform_ops {
*/
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
+/* Synchronisation structures for coordinating safe cluster setup/teardown: */
+
+/*
+ * When modifying this structure, make sure you update the MCPM_SYNC_ defines
+ * to match.
+ */
+struct mcpm_sync_struct {
+ /* individual CPU states */
+ struct {
+ s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
+ } cpus[MAX_CPUS_PER_CLUSTER];
+
+ /* cluster state */
+ s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
+
+ /* inbound-side state */
+ s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
+};
+
+struct sync_struct {
+ struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
+};
+
+extern unsigned long sync_phys; /* physical address of *mcpm_sync */
+
+void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
+bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
+int __mcpm_cluster_state(unsigned int cluster);
+
+int __init mcpm_sync_init(
+ void (*power_up_setup)(unsigned int affinity_level));
+
+#else
+
+/*
+ * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
+ * cannot be included in asm files. Let's work around the conflict like this.
+ */
+#include <asm/asm-offsets.h>
+#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
+
#endif /* ! __ASSEMBLY__ */
+
+/* Definitions for mcpm_sync_struct */
+#define CPU_DOWN 0x11
+#define CPU_COMING_UP 0x12
+#define CPU_UP 0x13
+#define CPU_GOING_DOWN 0x14
+
+#define CLUSTER_DOWN 0x21
+#define CLUSTER_UP 0x22
+#define CLUSTER_GOING_DOWN 0x23
+
+#define INBOUND_NOT_COMING_UP 0x31
+#define INBOUND_COMING_UP 0x32
+
+/*
+ * Offsets for the mcpm_sync_struct members, for use in asm.
+ * We don't want to make them global to the kernel via asm-offsets.c.
+ */
+#define MCPM_SYNC_CLUSTER_CPUS 0
+#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
+#define MCPM_SYNC_CLUSTER_CLUSTER \
+ (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
+#define MCPM_SYNC_CLUSTER_INBOUND \
+ (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
+#define MCPM_SYNC_CLUSTER_SIZE \
+ (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
+
#endif