This patch should not change anything functional; it just deletes some cruft
and converts the constraints to the new format.  This is to make it easier
to edit later.

 * The long-commented-out insns are now deleted.
 * The vestigial support for unaligned register pairs has been removed (it
 was disabled and removed elsewhere long ago).

gcc/ChangeLog:

        * config/gcn/gcn-valu.md (*mov<mode>_exec_match): Delete.
        (*mov<mode>_exec_match): Likewise.
        (*mov<mode>): Delete unaligned register support, and convert to new
        constraint syntax.
        (mov<mode>_exec): Likewise.
        (@mov<mode>_sgprbase): Likewise.
        * config/gcn/gcn.md (*movbi): Likewise.
---
 gcc/config/gcn/gcn-valu.md | 204 ++++++++-----------------------------
 gcc/config/gcn/gcn.md      |  76 ++++----------
 2 files changed, 64 insertions(+), 216 deletions(-)

diff --git a/gcc/config/gcn/gcn-valu.md b/gcc/config/gcn/gcn-valu.md
index 625191b3d13..9d752c717ff 100644
--- a/gcc/config/gcn/gcn-valu.md
+++ b/gcc/config/gcn/gcn-valu.md
@@ -477,89 +477,33 @@ (define_insn "mov<mode>_exec"
   [m,v ,U0,e ,&v;*    ,16] #
   })
 
-; This variant does not accept an unspec, but does permit MEM
-; read/modify/write which is necessary for maskstore.
-
-;(define_insn "*mov<mode>_exec_match"
-;  [(set (match_operand:V_1REG 0 "nonimmediate_operand" "=v,v, v, m")
-;      (vec_merge:V_1REG
-;        (match_operand:V_1REG 1 "general_operand"     "vA,B, m, v")
-;        (match_dup 0)
-;        (match_operand:DI 2 "gcn_exec_reg_operand"    " e,e, e, e")))
-;   (clobber (match_scratch:<VnDI> 3                   "=X,X,&v,&v"))]
-;  "!MEM_P (operands[0]) || REG_P (operands[1])"
-;  "@
-;  v_mov_b32\t%0, %1
-;  v_mov_b32\t%0, %1
-;  #
-;  #"
-;  [(set_attr "type" "vop1,vop1,*,*")
-;   (set_attr "length" "4,8,16,16")])
-
 (define_insn "*mov<mode>"
-  [(set (match_operand:V_2REG 0 "nonimmediate_operand" "=v, v,$a,a")
-       (match_operand:V_2REG 1 "general_operand"      "vDB,a, v,a"))]
-  ""
-  "@
-   * if (!REG_P (operands[1]) || REGNO (operands[0]) <= REGNO (operands[1])) \
-       return \"v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1\"; \
-     else \
-       return \"v_mov_b32\t%H0, %H1\;v_mov_b32\t%L0, %L1\";
-   * if (REGNO (operands[0]) <= REGNO (operands[1])) \
-       return \"v_accvgpr_read_b32\t%L0, %L1\;v_accvgpr_read_b32\t%H0, %H1\"; \
-     else \
-       return \"v_accvgpr_read_b32\t%H0, %H1\;v_accvgpr_read_b32\t%L0, %L1\";
-   * if (REGNO (operands[0]) <= REGNO (operands[1])) \
-       return \"v_accvgpr_write_b32\t%L0, %L1\;v_accvgpr_write_b32\t%H0, 
%H1\"; \
-     else \
-       return \"v_accvgpr_write_b32\t%H0, %H1\;v_accvgpr_write_b32\t%L0, %L1\";
-   * if (REGNO (operands[0]) <= REGNO (operands[1])) \
-       return \"v_accvgpr_mov_b32\t%L0, %L1\;v_accvgpr_mov_b32\t%H0, %H1\"; \
-     else \
-       return \"v_accvgpr_mov_b32\t%H0, %H1\;v_accvgpr_mov_b32\t%L0, %L1\";"
-  [(set_attr "type" "vmult,vmult,vmult,vmult")
-   (set_attr "length" "16,16,16,8")
-   (set_attr "cdna" "*,*,*,cdna2")])
+  [(set (match_operand:V_2REG 0 "nonimmediate_operand")
+       (match_operand:V_2REG 1 "general_operand"))]
+  ""
+  {@ [cons: =0, 1; attrs: length, cdna]
+  [v ,vDB;16,*    ] v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1
+  [v ,a  ;16,*    ] v_accvgpr_read_b32\t%L0, %L1\;v_accvgpr_read_b32\t%H0, %H1
+  [$a,v  ;16,*    ] v_accvgpr_write_b32\t%L0, %L1\;v_accvgpr_write_b32\t%H0, 
%H1
+  [a ,a  ;8 ,cdna2] v_accvgpr_mov_b32\t%L0, %L1\;v_accvgpr_mov_b32\t%H0, %H1
+  }
+  [(set_attr "type" "vmult,vmult,vmult,vmult")])
 
 (define_insn "mov<mode>_exec"
-  [(set (match_operand:V_2REG 0 "nonimmediate_operand" "= v,   v,   v, v, m")
+  [(set (match_operand:V_2REG 0 "nonimmediate_operand")
        (vec_merge:V_2REG
-         (match_operand:V_2REG 1 "general_operand"    "vDB,  v0,  v0, m, v")
-         (match_operand:V_2REG 2 "gcn_alu_or_unspec_operand"
-                                                      " U0,vDA0,vDA0,U0,U0")
-         (match_operand:DI 3 "register_operand"       "  e,  cV,  Sv, e, e")))
-   (clobber (match_scratch:<VnDI> 4                   "= X,   X,   X,&v,&v"))]
+         (match_operand:V_2REG 1 "general_operand")
+         (match_operand:V_2REG 2 "gcn_alu_or_unspec_operand")
+         (match_operand:DI 3 "register_operand")))
+   (clobber (match_scratch:<VnDI> 4))]
   "!MEM_P (operands[0]) || REG_P (operands[1])"
-  {
-    if (!REG_P (operands[1]) || REGNO (operands[0]) <= REGNO (operands[1]))
-      switch (which_alternative)
-       {
-       case 0:
-         return "v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1";
-       case 1:
-         return "v_cndmask_b32\t%L0, %L2, %L1, vcc\;"
-                "v_cndmask_b32\t%H0, %H2, %H1, vcc";
-       case 2:
-         return "v_cndmask_b32\t%L0, %L2, %L1, %3\;"
-                "v_cndmask_b32\t%H0, %H2, %H1, %3";
-       }
-    else
-      switch (which_alternative)
-       {
-       case 0:
-         return "v_mov_b32\t%H0, %H1\;v_mov_b32\t%L0, %L1";
-       case 1:
-         return "v_cndmask_b32\t%H0, %H2, %H1, vcc\;"
-                "v_cndmask_b32\t%L0, %L2, %L1, vcc";
-       case 2:
-         return "v_cndmask_b32\t%H0, %H2, %H1, %3\;"
-                "v_cndmask_b32\t%L0, %L2, %L1, %3";
-       }
-
-    return "#";
-  }
-  [(set_attr "type" "vmult,vmult,vmult,*,*")
-   (set_attr "length" "16,16,16,16,16")])
+  {@ [cons: =0, 1, 2, 3, =4; attrs: type, length]
+  [v,vDB,U0  ,e ,X ;vmult,16] v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1
+  [v,v0 ,vDA0,cV,X ;vmult,16] v_cndmask_b32\t%L0, %L2, %L1, 
vcc\;v_cndmask_b32\t%H0, %H2, %H1, vcc
+  [v,v0 ,vDA0,Sv,X ;vmult,16] v_cndmask_b32\t%L0, %L2, %L1, 
%3\;v_cndmask_b32\t%H0, %H2, %H1, %3
+  [v,m  ,U0  ,e ,&v;*    ,16] #
+  [m,v  ,U0  ,e ,&v;*    ,16] #
+  })
 
 (define_insn "*mov<mode>_4reg"
   [(set (match_operand:V_4REG 0 "nonimmediate_operand")
@@ -573,75 +517,20 @@ (define_insn "*mov<mode>_4reg"
   })
 
 (define_insn "mov<mode>_exec"
-  [(set (match_operand:V_4REG 0 "nonimmediate_operand" "= v,   v,   v, v, m")
+  [(set (match_operand:V_4REG 0 "nonimmediate_operand")
        (vec_merge:V_4REG
-         (match_operand:V_4REG 1 "general_operand"    "vDB,  v0,  v0, m, v")
-         (match_operand:V_4REG 2 "gcn_alu_or_unspec_operand"
-                                                      " U0,vDA0,vDA0,U0,U0")
-         (match_operand:DI 3 "register_operand"       "  e,  cV,  Sv, e, e")))
-   (clobber (match_scratch:<VnDI> 4                   "= X,   X,   X,&v,&v"))]
+         (match_operand:V_4REG 1 "general_operand")
+         (match_operand:V_4REG 2 "gcn_alu_or_unspec_operand")
+         (match_operand:DI 3 "register_operand")))
+   (clobber (match_scratch:<VnDI> 4))]
   "!MEM_P (operands[0]) || REG_P (operands[1])"
-  {
-    if (!REG_P (operands[1]) || REGNO (operands[0]) <= REGNO (operands[1]))
-      switch (which_alternative)
-       {
-       case 0:
-         return "v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1\;"
-                 "v_mov_b32\t%J0, %J1\;v_mov_b32\t%K0, %K1";
-       case 1:
-         return "v_cndmask_b32\t%L0, %L2, %L1, vcc\;"
-                "v_cndmask_b32\t%H0, %H2, %H1, vcc\;"
-                "v_cndmask_b32\t%J0, %J2, %J1, vcc\;"
-                "v_cndmask_b32\t%K0, %K2, %K1, vcc";
-       case 2:
-         return "v_cndmask_b32\t%L0, %L2, %L1, %3\;"
-                "v_cndmask_b32\t%H0, %H2, %H1, %3\;"
-                "v_cndmask_b32\t%J0, %J2, %J1, %3\;"
-                "v_cndmask_b32\t%K0, %K2, %K1, %3";
-       }
-    else
-      switch (which_alternative)
-       {
-       case 0:
-         return "v_mov_b32\t%H0, %H1\;v_mov_b32\t%L0, %L1\;"
-                 "v_mov_b32\t%J0, %J1\;v_mov_b32\t%K0, %K1";
-       case 1:
-         return "v_cndmask_b32\t%H0, %H2, %H1, vcc\;"
-                "v_cndmask_b32\t%L0, %L2, %L1, vcc\;"
-                "v_cndmask_b32\t%J0, %J2, %J1, vcc\;"
-                "v_cndmask_b32\t%K0, %K2, %K1, vcc";
-       case 2:
-         return "v_cndmask_b32\t%H0, %H2, %H1, %3\;"
-                "v_cndmask_b32\t%L0, %L2, %L1, %3\;"
-                "v_cndmask_b32\t%J0, %J2, %J1, %3\;"
-                "v_cndmask_b32\t%K0, %K2, %K1, %3";
-       }
-
-    return "#";
-  }
-  [(set_attr "type" "vmult,vmult,vmult,*,*")
-   (set_attr "length" "32")])
-
-; This variant does not accept an unspec, but does permit MEM
-; read/modify/write which is necessary for maskstore.
-
-;(define_insn "*mov<mode>_exec_match"
-;  [(set (match_operand:V_2REG 0 "nonimmediate_operand" "=v, v, m")
-;      (vec_merge:V_2REG
-;        (match_operand:V_2REG 1 "general_operand"     "vDB, m, v")
-;        (match_dup 0)
-;        (match_operand:DI 2 "gcn_exec_reg_operand"    " e, e, e")))
-;   (clobber (match_scratch:<VnDI> 3                   "=X,&v,&v"))]
-;  "!MEM_P (operands[0]) || REG_P (operands[1])"
-;  "@
-;   * if (!REG_P (operands[1]) || REGNO (operands[0]) <= REGNO (operands[1])) \
-;       return \"v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1\"; \
-;     else \
-;       return \"v_mov_b32\t%H0, %H1\;v_mov_b32\t%L0, %L1\";
-;   #
-;   #"
-;  [(set_attr "type" "vmult,*,*")
-;   (set_attr "length" "16,16,16")])
+  {@ [cons: =0, 1, 2, 3, =4; attrs: type, length]
+  [v,vDB,U0  ,e ,X ;vmult,32] v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, 
%H1\;v_mov_b32\t%J0, %J1\;v_mov_b32\t%K0, %K1
+  [v,v0 ,vDA0,cV,X ;vmult,32] v_cndmask_b32\t%L0, %L2, %L1, 
vcc\;v_cndmask_b32\t%H0, %H2, %H1, vcc\;v_cndmask_b32\t%J0, %J2, %J1, 
vcc\;v_cndmask_b32\t%K0, %K2, %K1, vcc
+  [v,v0 ,vDA0,Sv,X ;vmult,32] v_cndmask_b32\t%L0, %L2, %L1, 
%3\;v_cndmask_b32\t%H0, %H2, %H1, %3\;v_cndmask_b32\t%J0, %J2, %J1, 
%3\;v_cndmask_b32\t%K0, %K2, %K1, %3
+  [v,m  ,U0  ,e ,&v;*    ,32] #
+  [m,v  ,U0  ,e ,&v;*    ,32] #
+  })
 
 ; A SGPR-base load looks like:
 ;   <load> v, Sv
@@ -672,24 +561,19 @@ (define_insn "@mov<mode>_sgprbase"
   })
 
 (define_insn "@mov<mode>_sgprbase"
-  [(set (match_operand:V_2REG 0 "nonimmediate_operand" "= v, v, m, a, m")
+  [(set (match_operand:V_2REG 0 "nonimmediate_operand")
        (unspec:V_2REG
-         [(match_operand:V_2REG 1 "general_operand"   "vDB, m, v, m, a")]
+         [(match_operand:V_2REG 1 "general_operand")]
          UNSPEC_SGPRBASE))
-   (clobber (match_operand:<VnDI> 2 "register_operand"  "=&v,&v,&v,&v,&v"))]
+   (clobber (match_operand:<VnDI> 2 "register_operand"))]
   "lra_in_progress || reload_completed"
-  "@
-   * if (!REG_P (operands[1]) || REGNO (operands[0]) <= REGNO (operands[1])) \
-       return \"v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1\"; \
-     else \
-       return \"v_mov_b32\t%H0, %H1\;v_mov_b32\t%L0, %L1\";
-   #
-   #
-   #
-   #"
-  [(set_attr "type" "vmult,*,*,*,*")
-   (set_attr "length" "8,12,12,12,12")
-   (set_attr "cdna" "*,*,*,cdna2,cdna2")])
+  {@ [cons: =0, 1, =2; attrs: type, length, cdna]
+  [v,vDB,&v;vmult,8 ,*    ] v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1
+  [v,m  ,&v;*    ,12,*    ] #
+  [m,v  ,&v;*    ,12,*    ] #
+  [a,m  ,&v;*    ,12,cdna2] #
+  [m,a  ,&v;*    ,12,cdna2] #
+  })
 
 (define_insn "@mov<mode>_sgprbase"
   [(set (match_operand:V_4REG 0 "nonimmediate_operand")
diff --git a/gcc/config/gcn/gcn.md b/gcc/config/gcn/gcn.md
index 05a28ed8d75..5957b29f748 100644
--- a/gcc/config/gcn/gcn.md
+++ b/gcc/config/gcn/gcn.md
@@ -529,62 +529,26 @@ (define_split
 ; We need BImode move so we can reload flags registers.
 
 (define_insn "*movbi"
-  [(set (match_operand:BI 0 "nonimmediate_operand"
-                         "=Sg,   v,Sg,cs,cV,cV,Sm,&Sm,RS, v,&v,RF, v,&v,RM")
-       (match_operand:BI 1 "gcn_load_operand"
-                         "SSA,vSvA, v,SS, v,SS,RS, RS,Sm,RF,RF, v,RM,RM, v"))]
-  ""
-  {
-    /* SCC as an operand is currently not accepted by the LLVM assembler, so
-       we emit bytes directly as a workaround.  */
-    switch (which_alternative) {
-    case 0:
-      return "s_mov_b32\t%0, %1";
-    case 1:
-      if (REG_P (operands[1]) && REGNO (operands[1]) == SCC_REG)
-       return "; v_mov_b32\t%0, %1\;"
-              ".byte\t0xfd\;"
-              ".byte\t0x2\;"
-              ".byte\t((%V0<<1)&0xff)\;"
-              ".byte\t0x7e|(%V0>>7)";
-      else
-       return "v_mov_b32\t%0, %1";
-    case 2:
-      return "v_readlane_b32\t%0, %1, 0";
-    case 3:
-      return "s_cmpk_lg_u32\t%1, 0";
-    case 4:
-      return "v_cmp_ne_u32\tvcc, 0, %1";
-    case 5:
-      return "s_mov_b32\tvcc_lo, %1\;"
-            "s_mov_b32\tvcc_hi, 0";
-    case 6:
-    case 7:
-      return "s_load_dword\t%0, %A1\;s_waitcnt\tlgkmcnt(0)";
-    case 8:
-      return "s_store_dword\t%1, %A0";
-    case 9:
-    case 10:
-      return "flat_load_dword\t%0, %A1%O1%g1\;s_waitcnt\t0";
-    case 11:
-      return "flat_store_dword\t%A0, %1%O0%g0";
-    case 12:
-    case 13:
-      return "global_load_dword\t%0, %A1%O1%g1\;s_waitcnt\tvmcnt(0)";
-    case 14:
-      return "global_store_dword\t%A0, %1%O0%g0";
-    default:
-      gcc_unreachable ();
-    }
-  }
-  [(set_attr "type" "sop1,vop1,vop3a,sopk,vopc,mult,smem,smem,smem,flat,flat,
-                    flat,flat,flat,flat")
-   (set_attr "flatmemaccess" 
"*,*,*,*,*,*,*,*,*,load,load,store,load,load,store")
-   (set_attr "vcmp" "*,*,*,*,vcmp,*,*,*,*,*,*,*,*,*,*")
-   (set_attr "exec" "*,*,none,*,*,*,*,*,*,*,*,*,*,*,*")
-   (set_attr "length" "4,4,4,4,4,8,12,12,12,12,12,12,12,12,12")
-   (set_attr "xnack" "*,*,*,*,*,*,off,on,*,off,on,*,off,on,*")
-   (set_attr "laneselect" "*,*,read,*,*,*,*,*,*,*,*,*,*,*,*")])
+  [(set (match_operand:BI 0 "nonimmediate_operand")
+       (match_operand:BI 1 "gcn_load_operand"))]
+  "gcn_valid_move_p (BImode, operands[0], operands[1])"
+  {@ [cons: =0,1   ;attrs: 
type,exec,length,vcmp,xnack,laneselect,flatmemaccess]
+  [Sg ,SSA ;sop1 ,*   ,4 ,*   ,*  ,*   ,*    ] s_mov_b32\t%0 ,%1
+  [v  ,vSvA;vop1 ,*   ,4 ,*   ,*  ,*   ,*    ] v_mov_b32\t%0 ,%1
+  [Sg ,v   ;vop3a,none,4 ,*   ,*  ,read,*    ] v_readlane_b32\t%0 ,%1 ,0
+  [cs ,SS  ;sopk ,*   ,4 ,*   ,*  ,*   ,*    ] s_cmpk_lg_u32\t%1 ,0
+  [cV ,v   ;vopc ,*   ,4 ,vcmp,*  ,*   ,*    ] v_cmp_ne_u32\tvcc ,0 ,%1
+  [cV ,SS  ;mult ,*   ,8 ,*   ,*  ,*   ,*    ] s_mov_b32\tvcc_lo 
,%1\;s_mov_b32\tvcc_hi ,0
+  [Sm ,RS  ;smem ,*   ,12,*   ,off,*   ,*    ] s_load_dword\t%0 
,%A1\;s_waitcnt\tlgkmcnt(0)
+  [&Sm,RS  ;smem ,*   ,12,*   ,on ,*   ,*    ] ^
+  [RS ,Sm  ;smem ,*   ,12,*   ,*  ,*   ,*    ] s_store_dword\t%1 ,%A0
+  [v  ,RF  ;flat ,*   ,12,*   ,off,*   ,load ] flat_load_dword\t%0 
,%A1%O1%g1\;s_waitcnt\t0
+  [&v ,RF  ;flat ,*   ,12,*   ,on ,*   ,load ] ^
+  [RF ,v   ;flat ,*   ,12,*   ,*  ,*   ,store] flat_store_dword\t%A0 ,%1%O0%g0
+  [v  ,RM  ;flat ,*   ,12,*   ,off,*   ,load ] global_load_dword\t%0 
,%A1%O1%g1\;s_waitcnt\tvmcnt(0)
+  [&v ,RM  ;flat ,*   ,12,*   ,on ,*   ,load ] ^
+  [RM ,v   ;flat ,*   ,12,*   ,*  ,*   ,store] global_store_dword\t%A0,%1%O0%g0
+  })
 
 ; 32bit move pattern
 
-- 
2.51.0

Reply via email to