From 6c7691edd55bc13c221d7283c238d305f6923236 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Fri, 11 Feb 2022 11:29:35 +0000 Subject: [PATCH] armv8: Always unmask SErrors The ARMv8 architecture describes the "SError interrupt" as the fourth kind of exception, next to synchronous exceptions, IRQs, and FIQs. Those SErrors signal exceptional conditions from which the system might not easily recover, and are normally generated by the interconnect as a response to some bus error. A typical situation is access to a non-existing memory address or device, but it might be deliberately triggered by a device as well. The SError interrupt replaces the Armv7 asynchronous abort. Trusted Firmware enters U-Boot (BL33) typically with SErrors masked, and we never enable them. However any SError condition still triggers the SError interrupt, and this condition stays pending, it just won't be handled. If now later on the Linux kernel unmasks the "A" bit in PState, it will immediately take the exception, leading to a kernel crash. This leaves many people scratching their head about the reason for this, and leads to long debug sessions, possibly looking at the wrong places (the kernel, but not U-Boot). To avoid the situation, just unmask SErrors early in the ARMv8 boot process, so that the U-Boot exception handlers reports them in a timely manner. As SErrors are typically asynchronous, the register dump does not need to point at the actual culprit, but it should happen very shortly after the condition. For those exceptions to be taken, we also need to route them to EL2, if U-Boot is running in this exception level. This removes the respective code snippet from the Freescale lowlevel routine, as this is now handled in generic ARMv8 code. Reported-by: Nishanth Menon Signed-off-by: Andre Przywara --- arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S | 9 --------- arch/arm/cpu/armv8/start.S | 3 +++ arch/arm/include/asm/system.h | 1 + 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S index 3aa1a9c3e5..0929c58d43 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S +++ b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S @@ -74,15 +74,6 @@ ENDPROC(smp_kick_all_cpus) ENTRY(lowlevel_init) mov x29, lr /* Save LR */ - /* unmask SError and abort */ - msr daifclr, #4 - - /* Set HCR_EL2[AMO] so SError @EL2 is taken */ - mrs x0, hcr_el2 - orr x0, x0, #0x20 /* AMO */ - msr hcr_el2, x0 - isb - switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */ 1: diff --git a/arch/arm/cpu/armv8/start.S b/arch/arm/cpu/armv8/start.S index 91b00a46cc..d9610a5ea0 100644 --- a/arch/arm/cpu/armv8/start.S +++ b/arch/arm/cpu/armv8/start.S @@ -126,6 +126,8 @@ pie_fixup_done: b 0f 2: mrs x1, hcr_el2 tbnz x1, #34, 1f /* HCR_EL2.E2H */ + orr x1, x1, #HCR_EL2_AMO_EL2 /* Route SErrors to EL2 */ + msr hcr_el2, x1 set_vbar vbar_el2, x0 mov x0, #0x33ff msr cptr_el2, x0 /* Enable FP/SIMD */ @@ -134,6 +136,7 @@ pie_fixup_done: mov x0, #3 << 20 msr cpacr_el1, x0 /* Enable FP/SIMD */ 0: + msr daifclr, #0x4 /* Unmask SError interrupts */ #ifdef COUNTER_FREQUENCY branch_if_not_highest_el x0, 4f diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index f75eea16b3..87d1c77e8b 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -82,6 +82,7 @@ #define HCR_EL2_RW_AARCH64 (1 << 31) /* EL1 is AArch64 */ #define HCR_EL2_RW_AARCH32 (0 << 31) /* Lower levels are AArch32 */ #define HCR_EL2_HCD_DIS (1 << 29) /* Hypervisor Call disabled */ +#define HCR_EL2_AMO_EL2 (1 << 5) /* Route SErrors to EL2 */ /* * ID_AA64ISAR1_EL1 bits definitions -- 2.39.5