summaryrefslogtreecommitdiff
path: root/meta-arm/meta-arm-bsp/recipes-bsp/hafnium/files/tc/0004-feat-disable-alignment-check-for-EL0-partitions.patch
blob: d1e10d0ea09b293be7de739b601b0abbf01ae7fa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
From 1e24b45a8ff34af45dda45c57f8403452d384f99 Mon Sep 17 00:00:00 2001
From: Olivier Deprez <olivier.deprez@arm.com>
Date: Mon, 8 Aug 2022 19:14:23 +0200
Subject: [PATCH] feat: disable alignment check for EL0 partitions

Relax hw alignment check specifically for (S-)EL0 partitions when
Hafnium runs with VHE enabled. EL1 partitions have a specific control
for EL1 and EL0 with respect to alignment check.
Create a hyp_state structure (from already defined flying registers)
within the vCPU context to hold the Hypervisor EL2 static configuration
applied when a vCPU runs. This state is switched back and forth when
running the Hypervisor or the VM.
Add SCTLR_EL2 to this context. An EL0 partition context is initialized
with SCTLR_EL2.A=0 such that alignment check is disabled when EL0 runs
in the EL2&0 translation regime. SCTLR_EL2.A is set back when returning
to the Hypervisor such that Hypervisor execution runs with aligment
check enabled at EL2.
Remove HCR_EL2 saving from vCPU exit path provided this register state
is static and doesn't change while a vCPU runs.
The rationale for such change is to permit running upstream SW stacks
such as the EDKII/StandaloneMm [1] for which default build assumes
unaligned accesses are permitted. Similar query exists for running
Trusted Services on top of Hafnium [2].

[1] https://github.com/tianocore/edk2/tree/master/StandaloneMmPkg
[2] https://trusted-services.readthedocs.io/en/integration/

Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
Change-Id: I2906f4c712425fcfb31adbf89e2e3b9ca293f181
Upstream-Status: Submitted [https://review.trustedfirmware.org/c/hafnium/hafnium/+/16195]
---
 src/arch/aarch64/hypervisor/cpu.c        |  9 ++++---
 src/arch/aarch64/hypervisor/exceptions.S | 32 ++++++++++++++++--------
 src/arch/aarch64/hypervisor/feature_id.c |  6 ++---
 src/arch/aarch64/hypervisor/handler.c    | 18 +++++++------
 src/arch/aarch64/inc/hf/arch/types.h     |  9 +++++--
 src/arch/aarch64/mm.c                    |  2 +-
 src/arch/aarch64/sysregs.c               | 11 ++++++--
 src/arch/aarch64/sysregs.h               |  2 +-
 8 files changed, 59 insertions(+), 30 deletions(-)

diff --git a/src/arch/aarch64/hypervisor/cpu.c b/src/arch/aarch64/hypervisor/cpu.c
index d2df77d..a000159 100644
--- a/src/arch/aarch64/hypervisor/cpu.c
+++ b/src/arch/aarch64/hypervisor/cpu.c
@@ -115,7 +115,9 @@ void arch_regs_reset(struct vcpu *vcpu)
 		}
 	}
 
-	r->hcr_el2 = get_hcr_el2_value(vm_id, vcpu->vm->el0_partition);
+	r->hyp_state.hcr_el2 =
+		get_hcr_el2_value(vm_id, vcpu->vm->el0_partition);
+	r->hyp_state.sctlr_el2 = get_sctlr_el2_value(vcpu->vm->el0_partition);
 	r->lazy.cnthctl_el2 = cnthctl;
 	if (vcpu->vm->el0_partition) {
 		CHECK(has_vhe_support());
@@ -125,10 +127,11 @@ void arch_regs_reset(struct vcpu *vcpu)
 		 * are ignored and treated as 0. There is no need to mask the
 		 * VMID (used as asid) to only 8 bits.
 		 */
-		r->ttbr0_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
+		r->hyp_state.ttbr0_el2 =
+			pa_addr(table) | ((uint64_t)vm_id << 48);
 		r->spsr = PSR_PE_MODE_EL0T;
 	} else {
-		r->ttbr0_el2 = read_msr(ttbr0_el2);
+		r->hyp_state.ttbr0_el2 = read_msr(ttbr0_el2);
 		r->lazy.vtcr_el2 = arch_mm_get_vtcr_el2();
 		r->lazy.vttbr_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
 #if SECURE_WORLD == 1
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
index 539e196..d3732f8 100644
--- a/src/arch/aarch64/hypervisor/exceptions.S
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -20,6 +20,9 @@
 #define ID_AA64PFR0_SVE_SHIFT (32)
 #define ID_AA64PFR0_SVE_LENGTH (4)
 
+#define SCTLR_EL2_A_SHIFT	(1)
+#define HCR_EL2_TGE_SHIFT	(27)
+
 /**
  * Saves the volatile registers into the register buffer of the current vCPU.
  */
@@ -51,8 +54,6 @@
 	mrs x1, elr_el2
 	mrs x2, spsr_el2
 	stp x1, x2, [x18, #VCPU_REGS + 8 * 31]
-	mrs x1, hcr_el2
-	str x1, [x18, #VCPU_REGS + 8 * 33]
 .endm
 
 /**
@@ -871,12 +872,13 @@ vcpu_restore_volatile_and_run:
 	msr elr_el2, x1
 	msr spsr_el2, x2
 
-	ldr x1, [x0, #VCPU_REGS + 8 * 33]
+	ldp x1, x2, [x0, #VCPU_REGS + 8 * 33]
 	msr hcr_el2, x1
+	msr ttbr0_el2, x2
 	isb
 
-	ldr x1, [x0, #VCPU_REGS + 8 * 34]
-	msr ttbr0_el2, x1
+	ldr x1, [x0, #VCPU_REGS + 8 * 35]
+	msr sctlr_el2, x1
 	isb
 
 	/* Restore x0..x3, which we have used as scratch before. */
@@ -886,15 +888,17 @@ vcpu_restore_volatile_and_run:
 
 #if ENABLE_VHE
 enable_vhe_tge:
+	mrs x0, id_aa64mmfr1_el1
+	tst x0, #0xf00
+	b.eq 1f
+
 	/**
 	 * Switch to host mode ({E2H, TGE} = {1,1}) when VHE is enabled.
 	 * Note that E2H is always set when VHE is enabled.
 	 */
-	mrs x0, id_aa64mmfr1_el1
-	tst x0, #0xf00
-	b.eq 1f
-	orr x1, x1, #(1 << 27)
-	msr hcr_el2, x1
+	mrs x0, hcr_el2
+	orr x0, x0, #(1 << HCR_EL2_TGE_SHIFT)
+	msr hcr_el2, x0
 	isb
 
 	/**
@@ -905,6 +909,14 @@ enable_vhe_tge:
 	ldr x0, [x0]
 	msr ttbr0_el2, x0
 	isb
+
+	/**
+	 * Enable alignment check while Hypervisor runs.
+	 */
+	mrs x0, sctlr_el2
+	orr x0, x0, #(1 << SCTLR_EL2_A_SHIFT)
+	msr sctlr_el2, x0
+	isb
 1:
 	ret
 #endif
diff --git a/src/arch/aarch64/hypervisor/feature_id.c b/src/arch/aarch64/hypervisor/feature_id.c
index ed3bf8f..57f3262 100644
--- a/src/arch/aarch64/hypervisor/feature_id.c
+++ b/src/arch/aarch64/hypervisor/feature_id.c
@@ -175,7 +175,7 @@ void feature_set_traps(struct vm *vm, struct arch_regs *regs)
 		~(ID_AA64MMFR1_EL1_VH_MASK << ID_AA64MMFR1_EL1_VH_SHIFT);
 
 	if (features & HF_FEATURE_RAS) {
-		regs->hcr_el2 |= HCR_EL2_TERR;
+		regs->hyp_state.hcr_el2 |= HCR_EL2_TERR;
 		vm->arch.tid3_masks.id_aa64mmfr1_el1 &=
 			~ID_AA64MMFR1_EL1_SPEC_SEI;
 		vm->arch.tid3_masks.id_aa64pfr0_el1 &= ~ID_AA64PFR0_EL1_RAS;
@@ -221,14 +221,14 @@ void feature_set_traps(struct vm *vm, struct arch_regs *regs)
 	}
 
 	if (features & HF_FEATURE_LOR) {
-		regs->hcr_el2 |= HCR_EL2_TLOR;
+		regs->hyp_state.hcr_el2 |= HCR_EL2_TLOR;
 
 		vm->arch.tid3_masks.id_aa64mmfr1_el1 &= ~ID_AA64MMFR1_EL1_LO;
 	}
 
 	if (features & HF_FEATURE_PAUTH) {
 		/* APK and API bits *enable* trapping when cleared. */
-		regs->hcr_el2 &= ~(HCR_EL2_APK | HCR_EL2_API);
+		regs->hyp_state.hcr_el2 &= ~(HCR_EL2_APK | HCR_EL2_API);
 
 		vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPI;
 		vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPA;
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index cd5146b..8a3d628 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -272,9 +272,9 @@ noreturn void sync_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
 static void set_virtual_irq(struct arch_regs *r, bool enable)
 {
 	if (enable) {
-		r->hcr_el2 |= HCR_EL2_VI;
+		r->hyp_state.hcr_el2 |= HCR_EL2_VI;
 	} else {
-		r->hcr_el2 &= ~HCR_EL2_VI;
+		r->hyp_state.hcr_el2 &= ~HCR_EL2_VI;
 	}
 }
 
@@ -283,14 +283,15 @@ static void set_virtual_irq(struct arch_regs *r, bool enable)
  */
 static void set_virtual_irq_current(bool enable)
 {
-	uintreg_t hcr_el2 = current()->regs.hcr_el2;
+	struct vcpu *vcpu = current();
+	uintreg_t hcr_el2 = vcpu->regs.hyp_state.hcr_el2;
 
 	if (enable) {
 		hcr_el2 |= HCR_EL2_VI;
 	} else {
 		hcr_el2 &= ~HCR_EL2_VI;
 	}
-	current()->regs.hcr_el2 = hcr_el2;
+	vcpu->regs.hyp_state.hcr_el2 = hcr_el2;
 }
 
 /**
@@ -300,9 +301,9 @@ static void set_virtual_irq_current(bool enable)
 static void set_virtual_fiq(struct arch_regs *r, bool enable)
 {
 	if (enable) {
-		r->hcr_el2 |= HCR_EL2_VF;
+		r->hyp_state.hcr_el2 |= HCR_EL2_VF;
 	} else {
-		r->hcr_el2 &= ~HCR_EL2_VF;
+		r->hyp_state.hcr_el2 &= ~HCR_EL2_VF;
 	}
 }
 
@@ -311,14 +312,15 @@ static void set_virtual_fiq(struct arch_regs *r, bool enable)
  */
 static void set_virtual_fiq_current(bool enable)
 {
-	uintreg_t hcr_el2 = current()->regs.hcr_el2;
+	struct vcpu *vcpu = current();
+	uintreg_t hcr_el2 = vcpu->regs.hyp_state.hcr_el2;
 
 	if (enable) {
 		hcr_el2 |= HCR_EL2_VF;
 	} else {
 		hcr_el2 &= ~HCR_EL2_VF;
 	}
-	current()->regs.hcr_el2 = hcr_el2;
+	vcpu->regs.hyp_state.hcr_el2 = hcr_el2;
 }
 
 #if SECURE_WORLD == 1
diff --git a/src/arch/aarch64/inc/hf/arch/types.h b/src/arch/aarch64/inc/hf/arch/types.h
index 6379d73..6b8b24f 100644
--- a/src/arch/aarch64/inc/hf/arch/types.h
+++ b/src/arch/aarch64/inc/hf/arch/types.h
@@ -79,8 +79,13 @@ struct arch_regs {
 	uintreg_t r[NUM_GP_REGS];
 	uintreg_t pc;
 	uintreg_t spsr;
-	uintreg_t hcr_el2;
-	uintreg_t ttbr0_el2;
+
+	/* Hypervisor configuration while a vCPU runs. */
+	struct {
+		uintreg_t hcr_el2;
+		uintreg_t ttbr0_el2;
+		uintreg_t sctlr_el2;
+	} hyp_state;
 
 	/*
 	 * System registers.
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 8ee65ca..487ae35 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -886,7 +886,7 @@ bool arch_mm_init(paddr_t table)
 #endif
 				    (0xff << (8 * STAGE1_NORMALINDX)),
 
-		.sctlr_el2 = get_sctlr_el2_value(),
+		.sctlr_el2 = get_sctlr_el2_value(false),
 		.vstcr_el2 = (1U << 31) |	    /* RES1. */
 			     (0 << 30) |	    /* SA. */
 			     (0 << 29) |	    /* SW. */
diff --git a/src/arch/aarch64/sysregs.c b/src/arch/aarch64/sysregs.c
index e8c154b..087ba4e 100644
--- a/src/arch/aarch64/sysregs.c
+++ b/src/arch/aarch64/sysregs.c
@@ -159,7 +159,7 @@ uintreg_t get_cptr_el2_value(void)
 /**
  * Returns the value for SCTLR_EL2 for the CPU.
  */
-uintreg_t get_sctlr_el2_value(void)
+uintreg_t get_sctlr_el2_value(bool is_el0_partition)
 {
 	uintreg_t sctlr_el2_value = 0;
 
@@ -173,7 +173,14 @@ uintreg_t get_sctlr_el2_value(void)
 
 	/* MMU-related bits. */
 	sctlr_el2_value |= SCTLR_EL2_M;
-	sctlr_el2_value |= SCTLR_EL2_A;
+
+	/*
+	 * Alignment check enabled, but in the case of an EL0 partition
+	 * with VHE enabled.
+	 */
+	if (!(has_vhe_support() && is_el0_partition)) {
+		sctlr_el2_value |= SCTLR_EL2_A;
+	}
 	sctlr_el2_value |= SCTLR_EL2_C;
 	sctlr_el2_value |= SCTLR_EL2_SA;
 	sctlr_el2_value |= SCTLR_EL2_I;
diff --git a/src/arch/aarch64/sysregs.h b/src/arch/aarch64/sysregs.h
index babd237..6fdab58 100644
--- a/src/arch/aarch64/sysregs.h
+++ b/src/arch/aarch64/sysregs.h
@@ -668,7 +668,7 @@ uintreg_t get_mdcr_el2_value(void);
 
 uintreg_t get_cptr_el2_value(void);
 
-uintreg_t get_sctlr_el2_value(void);
+uintreg_t get_sctlr_el2_value(bool is_el0_partition);
 
 /**
  * Branch Target Identification mechanism support in AArch64 state.
-- 
2.34.1