When we fold to and, use fold_and, which sets s_mask and a_mask more intelligently than we do for the rest of deposit.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- tcg/optimize.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 424ed7674d..4d5fa04199 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1620,6 +1620,7 @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op) static bool fold_deposit(OptContext *ctx, TCGOp *op) { TCGOpcode and_opc; + uint64_t z_mask; if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { uint64_t t1 = arg_info(op->args[1])->val; @@ -1647,8 +1648,7 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) op->opc = and_opc; op->args[1] = op->args[2]; op->args[2] = arg_new_constant(ctx, mask); - ctx->z_mask = mask & arg_info(op->args[1])->z_mask; - return false; + return fold_and(ctx, op); } /* Inserting zero into a value. */ @@ -1657,14 +1657,12 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) op->opc = and_opc; op->args[2] = arg_new_constant(ctx, mask); - ctx->z_mask = mask & arg_info(op->args[1])->z_mask; - return false; + return fold_and(ctx, op); } - ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, - op->args[3], op->args[4], - arg_info(op->args[2])->z_mask); - return false; + z_mask = deposit64(arg_info(op->args[1])->z_mask, op->args[3], op->args[4], + arg_info(op->args[2])->z_mask); + return fold_masks_z(ctx, op, z_mask); } static bool fold_divide(OptContext *ctx, TCGOp *op) -- 2.43.0