2016-07-11  Marek Polacek  <pola...@redhat.com>

        PR c/7652
        * config/aarch64/aarch64-builtins.c (aarch64_simd_expand_args): Add 
gcc_fallthrough.
        * config/aarch64/aarch64-simd.md: Likewise.
        * config/aarch64/aarch64.c (aarch64_expand_mov_immediate): Likewise.
        (aarch64_print_operand): Likewise.
        (aarch64_rtx_costs): Likewise.
        (aarch64_expand_compare_and_swap): Likewise.
        (aarch64_gen_atomic_ldop): Likewise.
        (aarch64_split_atomic_op): Likewise.
        (aarch64_expand_vec_perm_const): Likewise.
        * config/aarch64/predicates.md: Likewise.
        * config/arm/arm-builtins.c (arm_expand_neon_args): Likewise.
        * config/arm/arm.c (const_ok_for_op): Likewise.
        (arm_rtx_costs_1): Likewise.
        (thumb1_size_rtx_costs): Likewise.
        (arm_size_rtx_costs): Likewise.
        (arm_new_rtx_costs): Likewise.
        (thumb2_reorg): Likewise.
        (output_move_double): Likewise.
        (output_move_neon): Likewise.
        (arm_print_operand): Likewise.
        (arm_expand_compare_and_swap): Likewise.
        (arm_split_atomic_op): Likewise.
        (arm_expand_vec_perm_const): Likewise.
        * config/arm/neon.md: Likewise.

diff --git gcc/gcc/config/aarch64/aarch64-builtins.c 
gcc/gcc/config/aarch64/aarch64-builtins.c
index 6b90b2a..fe37ea2 100644
--- gcc/gcc/config/aarch64/aarch64-builtins.c
+++ gcc/gcc/config/aarch64/aarch64-builtins.c
@@ -999,6 +999,7 @@ aarch64_simd_expand_args (rtx target, int icode, int 
have_retval,
                }
              /* Fall through - if the lane index isn't a constant then
                 the next case will error.  */
+             gcc_fallthrough ();
            case SIMD_ARG_CONSTANT:
 constant_arg:
              if (!(*insn_data[icode].operand[opc].predicate)
diff --git gcc/gcc/config/aarch64/aarch64-simd.md 
gcc/gcc/config/aarch64/aarch64-simd.md
index a19d171..110a070 100644
--- gcc/gcc/config/aarch64/aarch64-simd.md
+++ gcc/gcc/config/aarch64/aarch64-simd.md
@@ -2328,6 +2328,7 @@
       if (operands[5] == CONST0_RTX (<MODE>mode))
         break;
       /* Fall through, as may need to load into register.  */
+      gcc_fallthrough ();
     default:
       if (!REG_P (operands[5]))
         operands[5] = force_reg (<MODE>mode, operands[5]);
@@ -2430,6 +2431,7 @@
          break;
        }
       /* Fall through.  */
+      gcc_fallthrough ();
     default:
       if (!REG_P (operands[5]))
        operands[5] = force_reg (<VDQF:MODE>mode, operands[5]);
@@ -2441,6 +2443,7 @@
     case UNLT:
       inverse = 1;
       /* Fall through.  */
+      gcc_fallthrough ();
     case GE:
     case UNGE:
     case ORDERED:
@@ -2452,6 +2455,7 @@
     case UNLE:
       inverse = 1;
       /* Fall through.  */
+      gcc_fallthrough ();
     case GT:
     case UNGT:
       base_comparison = gen_aarch64_cmgt<VDQF:mode>;
@@ -2545,6 +2549,7 @@
         Swapping the operands to BSL will give the UNORDERED case.  */
      swap_bsl_operands = 1;
      /* Fall through.  */
+     gcc_fallthrough ();
     case ORDERED:
       emit_insn (gen_aarch64_cmgt<VDQF:mode> (tmp, operands[4], operands[5]));
       emit_insn (gen_aarch64_cmge<VDQF:mode> (mask, operands[5], operands[4]));
diff --git gcc/gcc/config/aarch64/aarch64.c gcc/gcc/config/aarch64/aarch64.c
index 512ef10..3ecf244 100644
--- gcc/gcc/config/aarch64/aarch64.c
+++ gcc/gcc/config/aarch64/aarch64.c
@@ -1833,6 +1833,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
              return;
            }
          /* FALLTHRU */
+         gcc_fallthrough ();
 
        case SYMBOL_SMALL_ABSOLUTE:
        case SYMBOL_TINY_ABSOLUTE:
@@ -4541,6 +4542,7 @@ aarch64_print_operand (FILE *f, rtx x, int code)
              break;
            }
          /* Fall through.  */
+         gcc_fallthrough ();
 
        default:
          output_operand_lossage ("Unsupported operand for code '%c'", code);
@@ -4713,6 +4715,7 @@ aarch64_print_operand (FILE *f, rtx x, int code)
        }
 
       /* Fall through */
+      gcc_fallthrough ();
 
     case 0:
       /* Print a normal operand, if it's a general register, then we
@@ -6192,6 +6195,7 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer 
ATTRIBUTE_UNUSED,
            *cost += rtx_cost (SUBREG_REG (op0), VOIDmode, SET, 0, speed);
 
          /* Fall through.  */
+         gcc_fallthrough ();
        case REG:
          /* The cost is one per vector-register copied.  */
          if (VECTOR_MODE_P (GET_MODE (op0)) && REG_P (op1))
@@ -6685,6 +6689,7 @@ cost_plus:
           return true;
         }
     /* Fall through.  */
+    gcc_fallthrough ();
     case XOR:
     case AND:
     cost_logic:
@@ -7081,6 +7086,7 @@ cost_plus:
        }
 
     /* Fall-through.  */
+    gcc_fallthrough ();
     case UMOD:
       if (speed)
        {
@@ -7356,6 +7362,7 @@ cost_plus:
         }
 
       /* Fall through.  */
+      gcc_fallthrough ();
     default:
       break;
     }
@@ -11422,6 +11429,7 @@ aarch64_expand_compare_and_swap (rtx operands[])
       rval = gen_reg_rtx (SImode);
       oldval = convert_modes (SImode, mode, oldval, true);
       /* Fall through.  */
+      gcc_fallthrough ();
 
     case SImode:
     case DImode:
@@ -11777,6 +11785,7 @@ aarch64_gen_atomic_ldop (enum rtx_code code, rtx 
out_data, rtx out_result,
          src = gen_lowpart (mode, src);
       }
       /* Fall-through.  */
+      gcc_fallthrough ();
     case PLUS:
       ldop_code = AARCH64_LDOP_PLUS;
       break;
@@ -11904,6 +11913,7 @@ aarch64_split_atomic_op (enum rtx_code code, rtx 
old_out, rtx new_out, rtx mem,
          code = PLUS;
        }
       /* Fall through.  */
+      gcc_fallthrough ();
 
     default:
       x = gen_rtx_fmt_ee (code, wmode, old_out, value);
@@ -12817,6 +12827,7 @@ aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx 
op1, rtx sel)
         of the permutation by folding the permutation into the single
         input vector.  */
       /* Fall Through.  */
+      gcc_fallthrough ();
     case 2:
       for (i = 0; i < nelt; ++i)
        d.perm[i] &= nelt - 1;
diff --git gcc/gcc/config/aarch64/predicates.md 
gcc/gcc/config/aarch64/predicates.md
index 8f2726d..9a7b336 100644
--- gcc/gcc/config/aarch64/predicates.md
+++ gcc/gcc/config/aarch64/predicates.md
@@ -180,6 +180,7 @@
          || GET_CODE (XEXP (op, 1)) != CONST_INT)
        return false;
       op = XEXP (op, 0);
+      gcc_fallthrough ();
 
     case SYMBOL_REF:
       return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_INITIAL_EXEC;
@@ -201,6 +202,7 @@
          || GET_CODE (XEXP (op, 1)) != CONST_INT)
        return false;
       op = XEXP (op, 0);
+      gcc_fallthrough ();
 
     case SYMBOL_REF:
       return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_LOCAL_EXEC;
diff --git gcc/gcc/config/arm/arm-builtins.c gcc/gcc/config/arm/arm-builtins.c
index 68b2839..3b03c29 100644
--- gcc/gcc/config/arm/arm-builtins.c
+++ gcc/gcc/config/arm/arm-builtins.c
@@ -2113,6 +2113,7 @@ arm_expand_neon_args (rtx target, machine_mode map_mode, 
int fcode,
                }
              /* Fall through - if the lane index isn't a constant then
                 the next case will error.  */
+             gcc_fallthrough ();
 
            case NEON_ARG_CONSTANT:
 constant_arg:
diff --git gcc/gcc/config/arm/arm.c gcc/gcc/config/arm/arm.c
index 580662d..e5f8208 100644
--- gcc/gcc/config/arm/arm.c
+++ gcc/gcc/config/arm/arm.c
@@ -3956,6 +3956,7 @@ const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
              || ((-i) & 0xfffff000) == 0))
        return 1;
       /* else fall through.  */
+      gcc_fallthrough ();
 
     case COMPARE:
     case EQ:
@@ -8469,6 +8470,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, 
bool speed)
        *total = rtx_cost (XEXP (x, 1), mode, code, 1, speed);
 
       /* Fall through */
+      gcc_fallthrough ();
     case ROTATERT:
       if (mode != SImode)
        {
@@ -8477,6 +8479,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, 
bool speed)
        }
 
       /* Fall through */
+      gcc_fallthrough ();
     case ASHIFT: case LSHIFTRT: case ASHIFTRT:
       *total += rtx_cost (XEXP (x, 0), mode, code, 0, speed);
       if (mode == DImode)
@@ -8590,6 +8593,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, 
bool speed)
        }
 
       /* Fall through */
+      gcc_fallthrough ();
 
     case PLUS:
       if (code == PLUS && arm_arch6 && mode == SImode
@@ -8646,6 +8650,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, 
bool speed)
        }
 
       /* Fall through */
+      gcc_fallthrough ();
 
     case AND: case XOR: case IOR:
 
@@ -8741,6 +8746,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, 
bool speed)
        }
 
       /* Fall through */
+      gcc_fallthrough ();
     case NOT:
       *total = COSTS_N_INSNS (ARM_NUM_REGS(mode));
       if (mode == SImode && code == NOT)
@@ -8837,6 +8843,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, 
bool speed)
        }
 
       /* Fall through */
+      gcc_fallthrough ();
     case COMPARE:
       if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) == CC_REGNUM)
        {
@@ -9076,6 +9083,7 @@ thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum 
rtx_code outer)
              && power_of_two_operand (XEXP (XEXP (x, 1), 1), SImode)))
        return COSTS_N_INSNS (2);
       /* On purpose fall through for normal RTX.  */
+      gcc_fallthrough ();
     case COMPARE:
     case NEG:
     case NOT:
@@ -9243,6 +9251,7 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum 
rtx_code outer_code,
          return true;
        }
       /* Fall through */
+      gcc_fallthrough ();
     case ROTATERT:
     case ASHIFT:
     case LSHIFTRT:
@@ -9317,6 +9326,7 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum 
rtx_code outer_code,
        }
 
       /* Fall through */
+      gcc_fallthrough ();
     case AND: case XOR: case IOR:
       if (mode == SImode)
        {
@@ -9348,6 +9358,7 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum 
rtx_code outer_code,
        }
 
       /* Fall through */
+      gcc_fallthrough ();
     case NOT:
       *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
 
@@ -9721,6 +9732,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum 
rtx_code outer_code,
        }
 
     /* Fall-through.  */
+    gcc_fallthrough ();
     case UMOD:
       *cost = LIBCALL_COST (2);
       return false;    /* All arguments must be in registers.  */
@@ -9735,6 +9747,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum 
rtx_code outer_code,
          return true;
        }
       /* Fall through */
+      gcc_fallthrough ();
     case ROTATERT:
     case ASHIFT:
     case LSHIFTRT:
@@ -10244,6 +10257,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum 
rtx_code outer_code,
           return true;
         }
     /* Fall through.  */
+    gcc_fallthrough ();
     case AND: case XOR:
       if (mode == SImode)
        {
@@ -11035,6 +11049,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum 
rtx_code outer_code,
          return true;
        }
       /* Fall through.  */
+      gcc_fallthrough ();
     case SMAX:
     case UMIN:
     case UMAX:
@@ -11076,6 +11091,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum 
rtx_code outer_code,
     case ZERO_EXTRACT:
       /* TODO: Simple zero_extract of bottom bits using AND.  */
       /* Fall through.  */
+      gcc_fallthrough ();
     case SIGN_EXTRACT:
       if (arm_arch6
          && mode == SImode
@@ -17506,6 +17522,7 @@ thumb2_reorg (void)
                      if (!optimize_size)
                        break;
                      /* else fall through.  */
+                     gcc_fallthrough ();
                    case AND:
                    case IOR:
                    case XOR:
@@ -18544,6 +18561,7 @@ output_move_double (rtx *operands, bool emit, int 
*count)
              return "";
            }
          /* Fall through */
+         gcc_fallthrough ();
 
         default:
          otherops[0] = adjust_address (operands[0], SImode, 4);
@@ -18811,6 +18829,7 @@ output_move_neon (rtx *operands)
          break;
        }
       /* Fall through.  */
+      gcc_fallthrough ();
     case LABEL_REF:
     case PLUS:
       {
@@ -21992,6 +22011,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
              break;
            }
          /* Fall through.  */
+         gcc_fallthrough ();
 
        default:
          output_operand_lossage ("Unsupported operand for code '%c'", code);
@@ -28107,6 +28127,7 @@ arm_expand_compare_and_swap (rtx operands[])
       rval = gen_reg_rtx (SImode);
       oldval = convert_modes (SImode, mode, oldval, true);
       /* FALLTHRU */
+      gcc_fallthrough ();
 
     case SImode:
       /* Force the value into a register if needed.  We waited until after
@@ -28298,6 +28319,7 @@ arm_split_atomic_op (enum rtx_code code, rtx old_out, 
rtx new_out, rtx mem,
          code = PLUS;
        }
       /* FALLTHRU */
+      gcc_fallthrough ();
 
     case PLUS:
       if (mode == DImode)
@@ -28317,6 +28339,7 @@ arm_split_atomic_op (enum rtx_code code, rtx old_out, 
rtx new_out, rtx mem,
          break;
        }
       /* FALLTHRU */
+      gcc_fallthrough ();
 
     default:
       x = gen_rtx_fmt_ee (code, wmode, old_out, value);
@@ -28904,6 +28927,7 @@ arm_expand_vec_perm_const (rtx target, rtx op0, rtx 
op1, rtx sel)
         of the permutation by folding the permutation into the single
         input vector.  */
       /* FALLTHRU */
+      gcc_fallthrough ();
     case 2:
       for (i = 0; i < nelt; ++i)
         d.perm[i] &= nelt - 1;
diff --git gcc/gcc/config/arm/neon.md gcc/gcc/config/arm/neon.md
index e2fdfbb..6488cdc 100644
--- gcc/gcc/config/arm/neon.md
+++ gcc/gcc/config/arm/neon.md
@@ -1731,6 +1731,7 @@
          break;
        }
       /* Fall through.  */
+      gcc_fallthrough ();
     default:
       if (!REG_P (operands[5]))
        operands[5] = force_reg (<MODE>mode, operands[5]);
@@ -1742,6 +1743,7 @@
     case UNLT:
       inverse = 1;
       /* Fall through.  */
+      gcc_fallthrough ();
     case GE:
     case UNGE:
     case ORDERED:
@@ -1753,6 +1755,7 @@
     case UNLE:
       inverse = 1;
       /* Fall through.  */
+      gcc_fallthrough ();
     case GT:
     case UNGT:
       base_comparison = gen_neon_vcgt<mode>;
@@ -1846,6 +1849,7 @@
         Swapping the operands to BSL will give the UNORDERED case.  */
      swap_bsl_operands = 1;
      /* Fall through.  */
+     gcc_fallthrough ();
     case ORDERED:
       emit_insn (gen_neon_vcgt<mode> (tmp, operands[4], operands[5]));
       emit_insn (gen_neon_vcge<mode> (mask, operands[5], operands[4]));

Reply via email to