Re: Implement -Wswitch-fallthrough: aarch64 + arm

2016-07-14 Thread Richard Earnshaw (lists)
Where the comments just say "Fall through", or equivalent, and there's
no other explanation I think those comments are now redundant and should
be removed.

So remove:

   /* Fall through.  */

but keep things like:

  /* Fall through - if the lane index isn't a constant then
 the next case will error.  */

OK with that change.

R.

On 11/07/16 20:53, Marek Polacek wrote:
> 2016-07-11  Marek Polacek  
> 
>   PR c/7652
>   * config/aarch64/aarch64-builtins.c (aarch64_simd_expand_args): Add 
> gcc_fallthrough.
>   * config/aarch64/aarch64-simd.md: Likewise.
>   * config/aarch64/aarch64.c (aarch64_expand_mov_immediate): Likewise.
>   (aarch64_print_operand): Likewise.
>   (aarch64_rtx_costs): Likewise.
>   (aarch64_expand_compare_and_swap): Likewise.
>   (aarch64_gen_atomic_ldop): Likewise.
>   (aarch64_split_atomic_op): Likewise.
>   (aarch64_expand_vec_perm_const): Likewise.
>   * config/aarch64/predicates.md: Likewise.
>   * config/arm/arm-builtins.c (arm_expand_neon_args): Likewise.
>   * config/arm/arm.c (const_ok_for_op): Likewise.
>   (arm_rtx_costs_1): Likewise.
>   (thumb1_size_rtx_costs): Likewise.
>   (arm_size_rtx_costs): Likewise.
>   (arm_new_rtx_costs): Likewise.
>   (thumb2_reorg): Likewise.
>   (output_move_double): Likewise.
>   (output_move_neon): Likewise.
>   (arm_print_operand): Likewise.
>   (arm_expand_compare_and_swap): Likewise.
>   (arm_split_atomic_op): Likewise.
>   (arm_expand_vec_perm_const): Likewise.
>   * config/arm/neon.md: Likewise.
> 
> diff --git gcc/gcc/config/aarch64/aarch64-builtins.c 
> gcc/gcc/config/aarch64/aarch64-builtins.c
> index 6b90b2a..fe37ea2 100644
> --- gcc/gcc/config/aarch64/aarch64-builtins.c
> +++ gcc/gcc/config/aarch64/aarch64-builtins.c
> @@ -999,6 +999,7 @@ aarch64_simd_expand_args (rtx target, int icode, int 
> have_retval,
>   }
> /* Fall through - if the lane index isn't a constant then
>the next case will error.  */
> +   gcc_fallthrough ();
>   case SIMD_ARG_CONSTANT:
>  constant_arg:
> if (!(*insn_data[icode].operand[opc].predicate)
> diff --git gcc/gcc/config/aarch64/aarch64-simd.md 
> gcc/gcc/config/aarch64/aarch64-simd.md
> index a19d171..110a070 100644
> --- gcc/gcc/config/aarch64/aarch64-simd.md
> +++ gcc/gcc/config/aarch64/aarch64-simd.md
> @@ -2328,6 +2328,7 @@
>if (operands[5] == CONST0_RTX (mode))
>  break;
>/* Fall through, as may need to load into register.  */
> +  gcc_fallthrough ();
>  default:
>if (!REG_P (operands[5]))
>  operands[5] = force_reg (mode, operands[5]);
> @@ -2430,6 +2431,7 @@
> break;
>   }
>/* Fall through.  */
> +  gcc_fallthrough ();
>  default:
>if (!REG_P (operands[5]))
>   operands[5] = force_reg (mode, operands[5]);
> @@ -2441,6 +2443,7 @@
>  case UNLT:
>inverse = 1;
>/* Fall through.  */
> +  gcc_fallthrough ();
>  case GE:
>  case UNGE:
>  case ORDERED:
> @@ -2452,6 +2455,7 @@
>  case UNLE:
>inverse = 1;
>/* Fall through.  */
> +  gcc_fallthrough ();
>  case GT:
>  case UNGT:
>base_comparison = gen_aarch64_cmgt;
> @@ -2545,6 +2549,7 @@
>Swapping the operands to BSL will give the UNORDERED case.  */
>   swap_bsl_operands = 1;
>   /* Fall through.  */
> + gcc_fallthrough ();
>  case ORDERED:
>emit_insn (gen_aarch64_cmgt (tmp, operands[4], 
> operands[5]));
>emit_insn (gen_aarch64_cmge (mask, operands[5], 
> operands[4]));
> diff --git gcc/gcc/config/aarch64/aarch64.c gcc/gcc/config/aarch64/aarch64.c
> index 512ef10..3ecf244 100644
> --- gcc/gcc/config/aarch64/aarch64.c
> +++ gcc/gcc/config/aarch64/aarch64.c
> @@ -1833,6 +1833,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
> return;
>   }
> /* FALLTHRU */
> +   gcc_fallthrough ();
>  
>   case SYMBOL_SMALL_ABSOLUTE:
>   case SYMBOL_TINY_ABSOLUTE:
> @@ -4541,6 +4542,7 @@ aarch64_print_operand (FILE *f, rtx x, int code)
> break;
>   }
> /* Fall through.  */
> +   gcc_fallthrough ();
>  
>   default:
> output_operand_lossage ("Unsupported operand for code '%c'", code);
> @@ -4713,6 +4715,7 @@ aarch64_print_operand (FILE *f, rtx x, int code)
>   }
>  
>/* Fall through */
> +  gcc_fallthrough ();
>  
>  case 0:
>/* Print a normal operand, if it's a general register, then we
> @@ -6192,6 +6195,7 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer 
> ATTRIBUTE_UNUSED,
>   *cost += rtx_cost (SUBREG_REG (op0), VOIDmode, SET, 0, speed);
>  
> /* Fall through.  */
> +   gcc_fallthrough ();
>   case REG:
> /* The cost is one per vector-register copied.  */
> if (VECTOR_MODE_P (GET_MODE (op0)) 

Re: Implement -Wswitch-fallthrough: aarch64 + arm

2016-07-11 Thread Marek Polacek
2016-07-11  Marek Polacek  

PR c/7652
* config/aarch64/aarch64-builtins.c (aarch64_simd_expand_args): Add 
gcc_fallthrough.
* config/aarch64/aarch64-simd.md: Likewise.
* config/aarch64/aarch64.c (aarch64_expand_mov_immediate): Likewise.
(aarch64_print_operand): Likewise.
(aarch64_rtx_costs): Likewise.
(aarch64_expand_compare_and_swap): Likewise.
(aarch64_gen_atomic_ldop): Likewise.
(aarch64_split_atomic_op): Likewise.
(aarch64_expand_vec_perm_const): Likewise.
* config/aarch64/predicates.md: Likewise.
* config/arm/arm-builtins.c (arm_expand_neon_args): Likewise.
* config/arm/arm.c (const_ok_for_op): Likewise.
(arm_rtx_costs_1): Likewise.
(thumb1_size_rtx_costs): Likewise.
(arm_size_rtx_costs): Likewise.
(arm_new_rtx_costs): Likewise.
(thumb2_reorg): Likewise.
(output_move_double): Likewise.
(output_move_neon): Likewise.
(arm_print_operand): Likewise.
(arm_expand_compare_and_swap): Likewise.
(arm_split_atomic_op): Likewise.
(arm_expand_vec_perm_const): Likewise.
* config/arm/neon.md: Likewise.

diff --git gcc/gcc/config/aarch64/aarch64-builtins.c 
gcc/gcc/config/aarch64/aarch64-builtins.c
index 6b90b2a..fe37ea2 100644
--- gcc/gcc/config/aarch64/aarch64-builtins.c
+++ gcc/gcc/config/aarch64/aarch64-builtins.c
@@ -999,6 +999,7 @@ aarch64_simd_expand_args (rtx target, int icode, int 
have_retval,
}
  /* Fall through - if the lane index isn't a constant then
 the next case will error.  */
+ gcc_fallthrough ();
case SIMD_ARG_CONSTANT:
 constant_arg:
  if (!(*insn_data[icode].operand[opc].predicate)
diff --git gcc/gcc/config/aarch64/aarch64-simd.md 
gcc/gcc/config/aarch64/aarch64-simd.md
index a19d171..110a070 100644
--- gcc/gcc/config/aarch64/aarch64-simd.md
+++ gcc/gcc/config/aarch64/aarch64-simd.md
@@ -2328,6 +2328,7 @@
   if (operands[5] == CONST0_RTX (mode))
 break;
   /* Fall through, as may need to load into register.  */
+  gcc_fallthrough ();
 default:
   if (!REG_P (operands[5]))
 operands[5] = force_reg (mode, operands[5]);
@@ -2430,6 +2431,7 @@
  break;
}
   /* Fall through.  */
+  gcc_fallthrough ();
 default:
   if (!REG_P (operands[5]))
operands[5] = force_reg (mode, operands[5]);
@@ -2441,6 +2443,7 @@
 case UNLT:
   inverse = 1;
   /* Fall through.  */
+  gcc_fallthrough ();
 case GE:
 case UNGE:
 case ORDERED:
@@ -2452,6 +2455,7 @@
 case UNLE:
   inverse = 1;
   /* Fall through.  */
+  gcc_fallthrough ();
 case GT:
 case UNGT:
   base_comparison = gen_aarch64_cmgt;
@@ -2545,6 +2549,7 @@
 Swapping the operands to BSL will give the UNORDERED case.  */
  swap_bsl_operands = 1;
  /* Fall through.  */
+ gcc_fallthrough ();
 case ORDERED:
   emit_insn (gen_aarch64_cmgt (tmp, operands[4], operands[5]));
   emit_insn (gen_aarch64_cmge (mask, operands[5], operands[4]));
diff --git gcc/gcc/config/aarch64/aarch64.c gcc/gcc/config/aarch64/aarch64.c
index 512ef10..3ecf244 100644
--- gcc/gcc/config/aarch64/aarch64.c
+++ gcc/gcc/config/aarch64/aarch64.c
@@ -1833,6 +1833,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
  return;
}
  /* FALLTHRU */
+ gcc_fallthrough ();
 
case SYMBOL_SMALL_ABSOLUTE:
case SYMBOL_TINY_ABSOLUTE:
@@ -4541,6 +4542,7 @@ aarch64_print_operand (FILE *f, rtx x, int code)
  break;
}
  /* Fall through.  */
+ gcc_fallthrough ();
 
default:
  output_operand_lossage ("Unsupported operand for code '%c'", code);
@@ -4713,6 +4715,7 @@ aarch64_print_operand (FILE *f, rtx x, int code)
}
 
   /* Fall through */
+  gcc_fallthrough ();
 
 case 0:
   /* Print a normal operand, if it's a general register, then we
@@ -6192,6 +6195,7 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer 
ATTRIBUTE_UNUSED,
*cost += rtx_cost (SUBREG_REG (op0), VOIDmode, SET, 0, speed);
 
  /* Fall through.  */
+ gcc_fallthrough ();
case REG:
  /* The cost is one per vector-register copied.  */
  if (VECTOR_MODE_P (GET_MODE (op0)) && REG_P (op1))
@@ -6685,6 +6689,7 @@ cost_plus:
   return true;
 }
 /* Fall through.  */
+gcc_fallthrough ();
 case XOR:
 case AND:
 cost_logic:
@@ -7081,6 +7086,7 @@ cost_plus:
}
 
 /* Fall-through.  */
+gcc_fallthrough ();
 case UMOD:
   if (speed)
{
@@ -7356,6 +7362,7 @@ cost_plus:
 }
 
   /* Fall through.  */
+  gcc_fallthrough ();
 default:
   break;
 }
@@ -11422,6 +11429,7 @@ aarch64_expand_compare_and_swap (rtx operands[])
   rval =