Re: [RFT PATCH, i386]: Optimize zero-extensions from mask registers

2016-08-22 Thread Kirill Yukhin
Hello Uroš,
On 05 Aug 14:22, Uros Bizjak wrote:
> Hello!
> 
> Attached patch was inspired by assembly from PR 72805 testcase.
> Currently, the compiler generates:
> 
> test:
> vpternlogd  $0xFF, %zmm0, %zmm0, %zmm0
> vpxord  %zmm1, %zmm1, %zmm1
> vpcmpd  $1, %zmm1, %zmm0, %k1
> kmovw   %k1, %eax
> movzwl  %ax, %eax
> ret
> 
> Please note that kmovw already zero-extended from a mask register.
> 
> 2016-08-05  Uros Bizjak  
> 
> * config/i386/i386.md (*zero_extendsidi2): Add (*r,*k) alternative.
> (zero_extenddi2): Ditto.
> (*zero_extendsi2): Ditto.
> (*zero_extendqihi2): Ditto.
> 
> Patch was bootstrapped and regression tested on x86_64-linux-gnu {,-m32}.
> 
> The patch is in RFT state, since I have no means to test AVX512 stuff.
> Kirill, can someone from Intel please test the patch?
I gave a try to your patch and see no regressions or bootstrap failures on 
i386/x86_64 (run on SDE).

--
Thanks, K
> 
> Uros.

> Index: config/i386/i386.md
> ===
> --- config/i386/i386.md   (revision 239166)
> +++ config/i386/i386.md   (working copy)
> @@ -3688,10 +3688,10 @@
>  
>  (define_insn "*zero_extendsidi2"
>[(set (match_operand:DI 0 "nonimmediate_operand"
> - "=r,?r,?o,r   ,o,?*Ym,?!*y,?r ,?r,?*Yi,?*x")
> + "=r,?r,?o,r   ,o,?*Ym,?!*y,?r ,?r,?*Yi,?*x,*r")
>   (zero_extend:DI
>(match_operand:SI 1 "x86_64_zext_operand"
> - "0 ,rm,r ,rmWz,0,r   ,m   ,*Yj,*x,r   ,m")))]
> + "0 ,rm,r ,rmWz,0,r   ,m   ,*Yj,*x,r   ,m  ,*k")))]
>""
>  {
>switch (get_attr_type (insn))
> @@ -3717,6 +3717,9 @@
>  
>return "%vmovd\t{%1, %0|%0, %1}";
>  
> +case TYPE_MSKMOV:
> +  return "kmovd\t{%1, %k0|%k0, %1}";
> +
>  default:
>gcc_unreachable ();
>  }
> @@ -3724,7 +3727,7 @@
>[(set (attr "isa")
>   (cond [(eq_attr "alternative" "0,1,2")
> (const_string "nox64")
> - (eq_attr "alternative" "3,7")
> + (eq_attr "alternative" "3,7,11")
> (const_string "x64")
>   (eq_attr "alternative" "8")
> (const_string "x64_sse4")
> @@ -3741,6 +3744,8 @@
> (const_string "ssemov")
>   (eq_attr "alternative" "8")
> (const_string "sselog1")
> + (eq_attr "alternative" "11")
> +   (const_string "mskmov")
>  ]
>  (const_string "imovx")))
> (set (attr "prefix_extra")
> @@ -3792,12 +3797,14 @@
>"split_double_mode (DImode, [0], 1, [3], [4]);")
>  
>  (define_insn "zero_extenddi2"
> -  [(set (match_operand:DI 0 "register_operand" "=r")
> +  [(set (match_operand:DI 0 "register_operand" "=r,*r")
>   (zero_extend:DI
> -  (match_operand:SWI12 1 "nonimmediate_operand" "m")))]
> +  (match_operand:SWI12 1 "nonimmediate_operand" "m,*k")))]
>"TARGET_64BIT"
> -  "movz{l|x}\t{%1, %k0|%k0, %1}"
> -  [(set_attr "type" "imovx")
> +  "@
> +   movz{l|x}\t{%1, %k0|%k0, %1}
> +   kmov\t{%1, %k0|%k0, %1}"
> +  [(set_attr "type" "imovx,mskmov")
> (set_attr "mode" "SI")])
>  
>  (define_expand "zero_extendsi2"
> @@ -3841,13 +3848,15 @@
> (set_attr "mode" "SI")])
>  
>  (define_insn "*zero_extendsi2"
> -  [(set (match_operand:SI 0 "register_operand" "=r")
> +  [(set (match_operand:SI 0 "register_operand" "=r,*r")
>   (zero_extend:SI
> -   (match_operand:SWI12 1 "nonimmediate_operand" "m")))]
> +   (match_operand:SWI12 1 "nonimmediate_operand" "m,*k")))]
>"!(TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun))"
> -  "movz{l|x}\t{%1, %0|%0, %1}"
> -  [(set_attr "type" "imovx")
> -   (set_attr "mode" "SI")])
> +  "@
> +   movz{l|x}\t{%1, %0|%0, %1}
> +   kmov\t{%1, %0|%0, %1}"
> +  [(set_attr "type" "imovx,mskmov")
> +   (set_attr "mode" "SI,")])
>  
>  (define_expand "zero_extendqihi2"
>[(set (match_operand:HI 0 "register_operand")
> @@ -3890,12 +3899,14 @@
>  
>  ; zero extend to SImode to avoid partial register stalls
>  (define_insn "*zero_extendqihi2"
> -  [(set (match_operand:HI 0 "register_operand" "=r")
> - (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "qm")))]
> +  [(set (match_operand:HI 0 "register_operand" "=r,*r")
> + (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "qm,*k")))]
>"!(TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun))"
> -  "movz{bl|x}\t{%1, %k0|%k0, %1}"
> -  [(set_attr "type" "imovx")
> -   (set_attr "mode" "SI")])
> +  "@
> +   movz{bl|x}\t{%1, %k0|%k0, %1}
> +   kmovb\t{%1, %k0|%k0, %1}"
> +  [(set_attr "type" "imovx,mskmov")
> +   (set_attr "mode" "SI,QI")])
>  
>  (define_insn_and_split "*zext_doubleword_and"
>[(set (match_operand:DI 0 "register_operand" "=&")



[RFT PATCH, i386]: Optimize zero-extensions from mask registers

2016-08-05 Thread Uros Bizjak
Hello!

Attached patch was inspired by assembly from PR 72805 testcase.
Currently, the compiler generates:

test:
vpternlogd  $0xFF, %zmm0, %zmm0, %zmm0
vpxord  %zmm1, %zmm1, %zmm1
vpcmpd  $1, %zmm1, %zmm0, %k1
kmovw   %k1, %eax
movzwl  %ax, %eax
ret

Please note that kmovw already zero-extended from a mask register.

Attached patch allows ree pass to propagate mask registers to zext
insn patterns, resulting in:

test:
vpternlogd  $0xFF, %zmm0, %zmm0, %zmm0  # 24
movv16si_internal/2 [length = 6]
vpxord  %zmm1, %zmm1, %zmm1 # 25movv16si_internal/1
 [length = 6]
vpcmpd  $1, %zmm1, %zmm0, %k1   # 13avx512f_cmpv16si3
 [length = 7]
kmovw   %k1, %eax   # 27*zero_extendhisi2/2 [length = 4]
ret # 30simple_return_internal  [length = 1]

2016-08-05  Uros Bizjak  

* config/i386/i386.md (*zero_extendsidi2): Add (*r,*k) alternative.
(zero_extenddi2): Ditto.
(*zero_extendsi2): Ditto.
(*zero_extendqihi2): Ditto.

Patch was bootstrapped and regression tested on x86_64-linux-gnu {,-m32}.

The patch is in RFT state, since I have no means to test AVX512 stuff.
Kirill, can someone from Intel please test the patch?

Uros.
Index: config/i386/i386.md
===
--- config/i386/i386.md (revision 239166)
+++ config/i386/i386.md (working copy)
@@ -3688,10 +3688,10 @@
 
 (define_insn "*zero_extendsidi2"
   [(set (match_operand:DI 0 "nonimmediate_operand"
-   "=r,?r,?o,r   ,o,?*Ym,?!*y,?r ,?r,?*Yi,?*x")
+   "=r,?r,?o,r   ,o,?*Ym,?!*y,?r ,?r,?*Yi,?*x,*r")
(zero_extend:DI
 (match_operand:SI 1 "x86_64_zext_operand"
-   "0 ,rm,r ,rmWz,0,r   ,m   ,*Yj,*x,r   ,m")))]
+   "0 ,rm,r ,rmWz,0,r   ,m   ,*Yj,*x,r   ,m  ,*k")))]
   ""
 {
   switch (get_attr_type (insn))
@@ -3717,6 +3717,9 @@
 
   return "%vmovd\t{%1, %0|%0, %1}";
 
+case TYPE_MSKMOV:
+  return "kmovd\t{%1, %k0|%k0, %1}";
+
 default:
   gcc_unreachable ();
 }
@@ -3724,7 +3727,7 @@
   [(set (attr "isa")
  (cond [(eq_attr "alternative" "0,1,2")
  (const_string "nox64")
-   (eq_attr "alternative" "3,7")
+   (eq_attr "alternative" "3,7,11")
  (const_string "x64")
(eq_attr "alternative" "8")
  (const_string "x64_sse4")
@@ -3741,6 +3744,8 @@
  (const_string "ssemov")
(eq_attr "alternative" "8")
  (const_string "sselog1")
+   (eq_attr "alternative" "11")
+ (const_string "mskmov")
   ]
   (const_string "imovx")))
(set (attr "prefix_extra")
@@ -3792,12 +3797,14 @@
   "split_double_mode (DImode, [0], 1, [3], [4]);")
 
 (define_insn "zero_extenddi2"
-  [(set (match_operand:DI 0 "register_operand" "=r")
+  [(set (match_operand:DI 0 "register_operand" "=r,*r")
(zero_extend:DI
-(match_operand:SWI12 1 "nonimmediate_operand" "m")))]
+(match_operand:SWI12 1 "nonimmediate_operand" "m,*k")))]
   "TARGET_64BIT"
-  "movz{l|x}\t{%1, %k0|%k0, %1}"
-  [(set_attr "type" "imovx")
+  "@
+   movz{l|x}\t{%1, %k0|%k0, %1}
+   kmov\t{%1, %k0|%k0, %1}"
+  [(set_attr "type" "imovx,mskmov")
(set_attr "mode" "SI")])
 
 (define_expand "zero_extendsi2"
@@ -3841,13 +3848,15 @@
(set_attr "mode" "SI")])
 
 (define_insn "*zero_extendsi2"
-  [(set (match_operand:SI 0 "register_operand" "=r")
+  [(set (match_operand:SI 0 "register_operand" "=r,*r")
(zero_extend:SI
- (match_operand:SWI12 1 "nonimmediate_operand" "m")))]
+ (match_operand:SWI12 1 "nonimmediate_operand" "m,*k")))]
   "!(TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun))"
-  "movz{l|x}\t{%1, %0|%0, %1}"
-  [(set_attr "type" "imovx")
-   (set_attr "mode" "SI")])
+  "@
+   movz{l|x}\t{%1, %0|%0, %1}
+   kmov\t{%1, %0|%0, %1}"
+  [(set_attr "type" "imovx,mskmov")
+   (set_attr "mode" "SI,")])
 
 (define_expand "zero_extendqihi2"
   [(set (match_operand:HI 0 "register_operand")
@@ -3890,12 +3899,14 @@
 
 ; zero extend to SImode to avoid partial register stalls
 (define_insn "*zero_extendqihi2"
-  [(set (match_operand:HI 0 "register_operand" "=r")
-   (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+  [(set (match_operand:HI 0 "register_operand" "=r,*r")
+   (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "qm,*k")))]
   "!(TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun))"
-  "movz{bl|x}\t{%1, %k0|%k0, %1}"
-  [(set_attr "type" "imovx")
-   (set_attr "mode" "SI")])
+  "@
+   movz{bl|x}\t{%1, %k0|%k0, %1}
+   kmovb\t{%1, %k0|%k0, %1}"
+  [(set_attr "type" "imovx,mskmov")
+   (set_attr "mode" "SI,QI")])
 
 (define_insn_and_split "*zext_doubleword_and"
   [(set (match_operand:DI 0 "register_operand" "=&")