If someone happens to write the following code:

        b       1f
        init_el2_state  vhe
1:
        [...]

they will be in for a long debugging session, as the label "1f"
will be resolved *inside* the init_el2_state macro instead of
after it. Not really what one expects.

Instead, rewite the EL2 setup macros to use unambiguous labels,
thanks to the usual macro counter trick.

Acked-by: Catalin Marinas <catalin.mari...@arm.com>
Signed-off-by: Marc Zyngier <m...@kernel.org>
Acked-by: David Brazdil <dbraz...@google.com>
---
 arch/arm64/include/asm/el2_setup.h | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/arch/arm64/include/asm/el2_setup.h 
b/arch/arm64/include/asm/el2_setup.h
index a7f5a1bbc8ac..540116de80bf 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -45,24 +45,24 @@
        mrs     x1, id_aa64dfr0_el1
        sbfx    x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
        cmp     x0, #1
-       b.lt    1f                              // Skip if no PMU present
+       b.lt    .Lskip_pmu_\@                   // Skip if no PMU present
        mrs     x0, pmcr_el0                    // Disable debug access traps
        ubfx    x0, x0, #11, #5                 // to EL2 and allow access to
-1:
+.Lskip_pmu_\@:
        csel    x2, xzr, x0, lt                 // all PMU counters from EL1
 
        /* Statistical profiling */
        ubfx    x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
-       cbz     x0, 3f                          // Skip if SPE not present
+       cbz     x0, .Lskip_spe_\@               // Skip if SPE not present
 
 .ifeqs "\mode", "nvhe"
        mrs_s   x0, SYS_PMBIDR_EL1              // If SPE available at EL2,
        and     x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
-       cbnz    x0, 2f                          // then permit sampling of 
physical
+       cbnz    x0, .Lskip_spe_el2_\@           // then permit sampling of 
physical
        mov     x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
                      1 << SYS_PMSCR_EL2_PA_SHIFT)
        msr_s   SYS_PMSCR_EL2, x0               // addresses and physical 
counter
-2:
+.Lskip_spe_el2_\@:
        mov     x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
        orr     x2, x2, x0                      // If we don't have VHE, then
                                                // use EL1&0 translation.
@@ -71,7 +71,7 @@
                                                // and disable access from EL1
 .endif
 
-3:
+.Lskip_spe_\@:
        msr     mdcr_el2, x2                    // Configure debug traps
 .endm
 
@@ -79,9 +79,9 @@
 .macro __init_el2_lor
        mrs     x1, id_aa64mmfr1_el1
        ubfx    x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
-       cbz     x0, 1f
+       cbz     x0, .Lskip_lor_\@
        msr_s   SYS_LORC_EL1, xzr
-1:
+.Lskip_lor_\@:
 .endm
 
 /* Stage-2 translation */
@@ -93,7 +93,7 @@
 .macro __init_el2_gicv3
        mrs     x0, id_aa64pfr0_el1
        ubfx    x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
-       cbz     x0, 1f
+       cbz     x0, .Lskip_gicv3_\@
 
        mrs_s   x0, SYS_ICC_SRE_EL2
        orr     x0, x0, #ICC_SRE_EL2_SRE        // Set ICC_SRE_EL2.SRE==1
@@ -103,7 +103,7 @@
        mrs_s   x0, SYS_ICC_SRE_EL2             // Read SRE back,
        tbz     x0, #0, 1f                      // and check that it sticks
        msr_s   SYS_ICH_HCR_EL2, xzr            // Reset ICC_HCR_EL2 to defaults
-1:
+.Lskip_gicv3_\@:
 .endm
 
 .macro __init_el2_hstr
@@ -128,14 +128,14 @@
 .macro __init_el2_nvhe_sve
        mrs     x1, id_aa64pfr0_el1
        ubfx    x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
-       cbz     x1, 1f
+       cbz     x1, .Lskip_sve_\@
 
        bic     x0, x0, #CPTR_EL2_TZ            // Also disable SVE traps
        msr     cptr_el2, x0                    // Disable copro. traps to EL2
        isb
        mov     x1, #ZCR_ELx_LEN_MASK           // SVE: Enable full vector
        msr_s   SYS_ZCR_EL2, x1                 // length for EL1.
-1:
+.Lskip_sve_\@:
 .endm
 
 .macro __init_el2_nvhe_prepare_eret
-- 
2.29.2

Reply via email to