This forced inlining can result in missing symbols, which makes a debugging build harder to follow.
Reported-by: Peter Maydell <peter.mayd...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- accel/tcg/cputlb.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index abae79650c..909f01ebcc 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1269,6 +1269,18 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, cpu_loop_exit_atomic(env_cpu(env), retaddr); } +/* + * In order for the expected constant folding to happen, + * we require that some functions be inlined. + * However, this inlining can make debugging harder for a + * non-optimizing build. + */ +#ifdef __OPTIMIZE__ +#define ALWAYS_INLINE __attribute__((always_inline)) +#else +#define ALWAYS_INLINE +#endif + /* * Load Helpers * @@ -1281,7 +1293,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); -static inline uint64_t __attribute__((always_inline)) +static inline uint64_t ALWAYS_INLINE load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr, MemOp op, bool code_read, FullLoadHelper *full_load) @@ -1530,7 +1542,7 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, * Store Helpers */ -static inline void __attribute__((always_inline)) +static inline void ALWAYS_INLINE store_helper(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) { -- 2.17.1