Hello!
... and some unrelated cleanups involving simplifying a couple of
switch statements.
2011-06-02 Uros Bizjak <[email protected]>
* config/i386/i386.c (standard_sse_constant_p) <case 1>:
Simplify switch statement.
* config/i386/i386.md (*movdf_internal_rex64) <case 8,9,10>: Ditto.
(*movdf_internal) <case 6,7,8>: Ditto.
* config/i386/constraints.md (Y4): New constraint.
* config/i386/sse.md (vec_set<mode>_0): Merge with
*vec_set<mode>_0_sse4_1 and *vec_set<mode>_0_sse2.
(*vec_extractv2di_1): Merge from *vec_extractv2di_1_sse2 and
*vec_extractv2di_1_sse.
(*vec_concatv2di_rex64): Merge from *vec_concatv2di_rex64_sse4_1
and *vec_concatv2di_rex64_sse.
testsuite/ChangeLog:
2011-06-02 Uros Bizjak <[email protected]>
* gcc.target/i386/sse2-init-v2di-2: Update scan-assembler-times string.
Bootstrapped and regression tested on x86_64-pc-linux-gnu, committed
to mainline SVN.
Uros.
Index: testsuite/gcc.target/i386/sse2-init-v2di-2.c
===================================================================
--- testsuite/gcc.target/i386/sse2-init-v2di-2.c (revision 174566)
+++ testsuite/gcc.target/i386/sse2-init-v2di-2.c (working copy)
@@ -10,4 +10,4 @@ test (long long b)
return _mm_cvtsi64_si128 (b);
}
-/* { dg-final { scan-assembler-times "\\*vec_concatv2di_rex64_sse4_1/4" 1 } }
*/
+/* { dg-final { scan-assembler-times "\\*vec_concatv2di_rex64/4" 1 } } */
Index: config/i386/i386.md
===================================================================
--- config/i386/i386.md (revision 174566)
+++ config/i386/i386.md (working copy)
@@ -2956,18 +2956,15 @@
case 10:
switch (get_attr_mode (insn))
{
- case MODE_V4SF:
- return "%vmovaps\t{%1, %0|%0, %1}";
- case MODE_V2DF:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vmovaps\t{%1, %0|%0, %1}";
- else
- return "%vmovapd\t{%1, %0|%0, %1}";
case MODE_TI:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vmovaps\t{%1, %0|%0, %1}";
- else
+ if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
return "%vmovdqa\t{%1, %0|%0, %1}";
+ case MODE_V2DF:
+ if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
+ return "%vmovapd\t{%1, %0|%0, %1}";
+ case MODE_V4SF:
+ return "%vmovaps\t{%1, %0|%0, %1}";
+
case MODE_DI:
return "%vmovq\t{%1, %0|%0, %1}";
case MODE_DF:
@@ -3102,18 +3099,15 @@
case 8:
switch (get_attr_mode (insn))
{
- case MODE_V4SF:
- return "%vmovaps\t{%1, %0|%0, %1}";
- case MODE_V2DF:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vmovaps\t{%1, %0|%0, %1}";
- else
- return "%vmovapd\t{%1, %0|%0, %1}";
case MODE_TI:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vmovaps\t{%1, %0|%0, %1}";
- else
+ if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
return "%vmovdqa\t{%1, %0|%0, %1}";
+ case MODE_V2DF:
+ if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
+ return "%vmovapd\t{%1, %0|%0, %1}";
+ case MODE_V4SF:
+ return "%vmovaps\t{%1, %0|%0, %1}";
+
case MODE_DI:
return "%vmovq\t{%1, %0|%0, %1}";
case MODE_DF:
Index: config/i386/constraints.md
===================================================================
--- config/i386/constraints.md (revision 174566)
+++ config/i386/constraints.md (working copy)
@@ -99,6 +99,9 @@
(define_register_constraint "Y2" "TARGET_SSE2 ? SSE_REGS : NO_REGS"
"@internal Any SSE register, when SSE2 is enabled.")
+(define_register_constraint "Y4" "TARGET_SSE4_1 ? SSE_REGS : NO_REGS"
+ "@internal Any SSE register, when SSE4_1 is enabled.")
+
(define_register_constraint "Yi"
"TARGET_SSE2 && TARGET_INTER_UNIT_MOVES ? SSE_REGS : NO_REGS"
"@internal Any SSE register, when SSE2 and inter-unit moves are enabled.")
Index: config/i386/sse.md
===================================================================
--- config/i386/sse.md (revision 174566)
+++ config/i386/sse.md (working copy)
@@ -3376,79 +3376,35 @@
;; Avoid combining registers from different units in a single alternative,
;; see comment above inline_secondary_memory_needed function in i386.c
-(define_insn "*vec_set<mode>_0_sse4_1"
+(define_insn "vec_set<mode>_0"
[(set (match_operand:VI4F_128 0 "nonimmediate_operand"
- "=x,x,x ,x,x,x ,x ,m,m,m")
+ "=Y4,Y2,Y2,x,x,x,Y4 ,x ,m,m,m")
(vec_merge:VI4F_128
(vec_duplicate:VI4F_128
(match_operand:<ssescalarmode> 2 "general_operand"
- " x,m,*r,x,x,*rm,*rm,x,*r,fF"))
+ " Y4,m ,*r,m,x,x,*rm,*rm,x,*r,fF"))
(match_operand:VI4F_128 1 "vector_move_operand"
- " C,C,C ,0,x,0 ,x ,0,0 ,0")
+ " C ,C ,C ,C,0,x,0 ,x ,0,0 ,0")
(const_int 1)))]
- "TARGET_SSE4_1"
+ "TARGET_SSE"
"@
%vinsertps\t{$0xe, %d2, %0|%0, %d2, 0xe}
%vmov<ssescalarmodesuffix>\t{%2, %0|%0, %2}
%vmovd\t{%2, %0|%0, %2}
movss\t{%2, %0|%0, %2}
+ movss\t{%2, %0|%0, %2}
vmovss\t{%2, %1, %0|%0, %1, %2}
pinsrd\t{$0, %2, %0|%0, %2, 0}
vpinsrd\t{$0, %2, %1, %0|%0, %1, %2, 0}
#
#
#"
- [(set_attr "isa" "base,base,base,noavx,avx,noavx,avx,base,base,base")
- (set_attr "type" "sselog,ssemov,ssemov,ssemov,ssemov,sselog,sselog,*,*,*")
- (set_attr "prefix_extra" "*,*,*,*,*,1,1,*,*,*")
- (set_attr "length_immediate" "*,*,*,*,*,1,1,*,*,*")
- (set_attr "prefix" "maybe_vex,maybe_vex,maybe_vex,orig,vex,orig,vex,*,*,*")
- (set_attr "mode" "SF,<ssescalarmode>,SI,SF,SF,TI,TI,*,*,*")])
-
-;; Avoid combining registers from different units in a single alternative,
-;; see comment above inline_secondary_memory_needed function in i386.c
-(define_insn "*vec_set<mode>_0_sse2"
- [(set (match_operand:VI4F_128 0 "nonimmediate_operand"
- "=x,x ,x,m,m ,m")
- (vec_merge:VI4F_128
- (vec_duplicate:VI4F_128
- (match_operand:<ssescalarmode> 2 "general_operand"
- " m,*r,x,x,*r,fF"))
- (match_operand:VI4F_128 1 "vector_move_operand"
- " C, C,0,0,0 ,0")
- (const_int 1)))]
- "TARGET_SSE2"
- "@
- mov<ssescalarmodesuffix>\t{%2, %0|%0, %2}
- movd\t{%2, %0|%0, %2}
- movss\t{%2, %0|%0, %2}
- #
- #
- #"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "<ssescalarmode>,SI,SF,*,*,*")])
-
-;; Avoid combining registers from different units in a single alternative,
-;; see comment above inline_secondary_memory_needed function in i386.c
-(define_insn "vec_set<mode>_0"
- [(set (match_operand:VI4F_128 0 "nonimmediate_operand"
- "=x,x,m,m ,m")
- (vec_merge:VI4F_128
- (vec_duplicate:VI4F_128
- (match_operand:<ssescalarmode> 2 "general_operand"
- " m,x,x,*r,fF"))
- (match_operand:VI4F_128 1 "vector_move_operand"
- " C,0,0,0 ,0")
- (const_int 1)))]
- "TARGET_SSE"
- "@
- movss\t{%2, %0|%0, %2}
- movss\t{%2, %0|%0, %2}
- #
- #
- #"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "SF,SF,*,*,*")])
+ [(set_attr "isa" "base,base,base,noavx,noavx,avx,noavx,avx,base,base,base")
+ (set_attr "type"
"sselog,ssemov,ssemov,ssemov,ssemov,ssemov,sselog,sselog,*,*,*")
+ (set_attr "prefix_extra" "*,*,*,*,*,*,1,1,*,*,*")
+ (set_attr "length_immediate" "*,*,*,*,*,*,1,1,*,*,*")
+ (set_attr "prefix"
"maybe_vex,maybe_vex,maybe_vex,orig,orig,vex,orig,vex,*,*,*")
+ (set_attr "mode" "SF,<ssescalarmode>,SI,SF,SF,SF,TI,TI,*,*,*")])
;; A subset is vec_setv4sf.
(define_insn "*vec_setv4sf_sse4_1"
@@ -6214,7 +6170,7 @@
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
-;; It must come before *vec_extractv2di_1_sse since it is preferred.
+;; It must come before *vec_extractv2di_1_rex64 since it is preferred.
(define_insn "*sse4_1_pextrq"
[(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
(vec_select:DI
@@ -6478,39 +6434,26 @@
(set_attr "prefix" "maybe_vex,orig,vex,maybe_vex,orig")
(set_attr "mode" "V2SF,TI,TI,TI,DI")])
-(define_insn "*vec_extractv2di_1_sse2"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=m,x,x,x")
+(define_insn "*vec_extractv2di_1"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,Y2,Y2,Y2,x,x")
(vec_select:DI
- (match_operand:V2DI 1 "nonimmediate_operand" " x,0,x,o")
+ (match_operand:V2DI 1 "nonimmediate_operand" " x,0 ,Y2,o ,x,o")
(parallel [(const_int 1)])))]
- "!TARGET_64BIT
- && TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "!TARGET_64BIT && TARGET_SSE
+ && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"@
%vmovhps\t{%1, %0|%0, %1}
psrldq\t{$8, %0|%0, 8}
vpsrldq\t{$8, %1, %0|%0, %1, 8}
- %vmovq\t{%H1, %0|%0, %H1}"
- [(set_attr "isa" "base,noavx,avx,base")
- (set_attr "type" "ssemov,sseishft1,sseishft1,ssemov")
- (set_attr "length_immediate" "*,1,1,*")
- (set_attr "memory" "*,none,none,*")
- (set_attr "prefix" "maybe_vex,orig,vex,maybe_vex")
- (set_attr "mode" "V2SF,TI,TI,TI")])
-
-;; Not sure this is ever used, but it doesn't hurt to have it. -aoliva
-(define_insn "*vec_extractv2di_1_sse"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=m,x,x")
- (vec_select:DI
- (match_operand:V2DI 1 "nonimmediate_operand" " x,x,o")
- (parallel [(const_int 1)])))]
- "!TARGET_SSE2 && TARGET_SSE
- && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "@
- movhps\t{%1, %0|%0, %1}
+ %vmovq\t{%H1, %0|%0, %H1}
movhlps\t{%1, %0|%0, %1}
movlps\t{%H1, %0|%0, %H1}"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "V2SF,V4SF,V2SF")])
+ [(set_attr "isa" "base,noavx,avx,base,noavx,noavx")
+ (set_attr "type" "ssemov,sseishft1,sseishft1,ssemov,ssemov,ssemov")
+ (set_attr "length_immediate" "*,1,1,*,*,*")
+ (set_attr "memory" "*,none,none,*,*,*")
+ (set_attr "prefix" "maybe_vex,orig,vex,maybe_vex,orig,orig")
+ (set_attr "mode" "V2SF,TI,TI,TI,V4SF,V2SF")])
(define_insn "*vec_dupv4si_avx"
[(set (match_operand:V4SI 0 "register_operand" "=x,x")
@@ -6570,8 +6513,8 @@
(match_operand:SI 2 "vector_move_operand" "rm,rm,x,x, C,*ym, C")))]
"TARGET_SSE4_1"
"@
- pinsrd\t{$0x1, %2, %0|%0, %2, 0x1}
- vpinsrd\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}
+ pinsrd\t{$1, %2, %0|%0, %2, 1}
+ vpinsrd\t{$1, %2, %1, %0|%0, %1, %2, 1}
punpckldq\t{%2, %0|%0, %2}
vpunpckldq\t{%2, %1, %0|%0, %1, %2}
%vmovd\t{%1, %0|%0, %1}
@@ -6633,18 +6576,18 @@
(set_attr "mode" "TI,TI,V4SF,V2SF,V2SF")])
;; movd instead of movq is required to handle broken assemblers.
-(define_insn "*vec_concatv2di_rex64_sse4_1"
+(define_insn "*vec_concatv2di_rex64"
[(set (match_operand:V2DI 0 "register_operand"
- "=x, x, x,Yi,!x,x,x,x,x")
+ "=Y4,x ,x ,Yi,!x,x,x,x,x")
(vec_concat:V2DI
(match_operand:DI 1 "nonimmediate_operand"
- " 0, x,xm,r ,*y,0,x,0,x")
+ " 0 ,x ,xm,r ,*y,0,x,0,x")
(match_operand:DI 2 "vector_move_operand"
- "rm,rm, C,C ,C ,x,x,m,m")))]
- "TARGET_64BIT && TARGET_SSE4_1"
+ " rm,rm,C ,C ,C ,x,x,m,m")))]
+ "TARGET_64BIT"
"@
- pinsrq\t{$0x1, %2, %0|%0, %2, 0x1}
- vpinsrq\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}
+ pinsrq\t{$1, %2, %0|%0, %2, 1}
+ vpinsrq\t{$1, %2, %1, %0|%0, %1, %2, 1}
%vmovq\t{%1, %0|%0, %1}
%vmovd\t{%1, %0|%0, %1}
movq2dq\t{%1, %0|%0, %1}
@@ -6653,7 +6596,11 @@
movhps\t{%2, %0|%0, %2}
vmovhps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx,base,base,base,noavx,avx,noavx,avx")
- (set_attr "type"
"sselog,sselog,ssemov,ssemov,ssemov,sselog,sselog,ssemov,ssemov")
+ (set (attr "type")
+ (if_then_else
+ (eq_attr "alternative" "0,1,5,6")
+ (const_string "sselog")
+ (const_string "ssemov")))
(set (attr "prefix_rex")
(if_then_else
(and (eq_attr "alternative" "0,3")
@@ -6665,24 +6612,6 @@
(set_attr "prefix" "orig,vex,maybe_vex,maybe_vex,orig,orig,vex,orig,vex")
(set_attr "mode" "TI,TI,TI,TI,TI,TI,TI,V2SF,V2SF")])
-;; movd instead of movq is required to handle broken assemblers.
-(define_insn "*vec_concatv2di_rex64_sse"
- [(set (match_operand:V2DI 0 "register_operand" "=Y2,Yi,!Y2,Y2,x,x")
- (vec_concat:V2DI
- (match_operand:DI 1 "nonimmediate_operand" "Y2m,r ,*y ,0 ,0,0")
- (match_operand:DI 2 "vector_move_operand" " C ,C ,C ,Y2,x,m")))]
- "TARGET_64BIT && TARGET_SSE"
- "@
- movq\t{%1, %0|%0, %1}
- movd\t{%1, %0|%0, %1}
- movq2dq\t{%1, %0|%0, %1}
- punpcklqdq\t{%2, %0|%0, %2}
- movlhps\t{%2, %0|%0, %2}
- movhps\t{%2, %0|%0, %2}"
- [(set_attr "type" "ssemov,ssemov,ssemov,sselog,ssemov,ssemov")
- (set_attr "prefix_rex" "*,1,*,*,*,*")
- (set_attr "mode" "TI,TI,TI,TI,V4SF,V2SF")])
-
(define_insn "vec_concatv2di"
[(set (match_operand:V2DI 0 "register_operand" "=Y2,?Y2,Y2,x,x,x,x")
(vec_concat:V2DI
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c (revision 174566)
+++ config/i386/i386.c (working copy)
@@ -8601,33 +8601,28 @@ standard_sse_constant_opcode (rtx insn,
case 1:
switch (get_attr_mode (insn))
{
- case MODE_V4SF:
- return "%vxorps\t%0, %d0";
- case MODE_V2DF:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vxorps\t%0, %d0";
- else
- return "%vxorpd\t%0, %d0";
case MODE_TI:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vxorps\t%0, %d0";
- else
+ if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
return "%vpxor\t%0, %d0";
- case MODE_V8SF:
- return "vxorps\t%x0, %x0, %x0";
- case MODE_V4DF:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "vxorps\t%x0, %x0, %x0";
- else
- return "vxorpd\t%x0, %x0, %x0";
+ case MODE_V2DF:
+ if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
+ return "%vxorpd\t%0, %d0";
+ case MODE_V4SF:
+ return "%vxorps\t%0, %d0";
+
case MODE_OI:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "vxorps\t%x0, %x0, %x0";
- else
+ if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
return "vpxor\t%x0, %x0, %x0";
+ case MODE_V4DF:
+ if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
+ return "vxorpd\t%x0, %x0, %x0";
+ case MODE_V8SF:
+ return "vxorps\t%x0, %x0, %x0";
+
default:
break;
}
+
case 2:
return "%vpcmpeqd\t%0, %d0";
default: