Re: [PATCH v3 34/48] tcg/optimize: Split out fold_to_not

2021-10-25 Thread Richard Henderson

On 10/25/21 11:13 AM, Luis Fernando Fujita Pires wrote:

From: Richard Henderson 


   static bool fold_eqv(OptContext *ctx, TCGOp *op)  {
-return fold_const2(ctx, op);
+if (fold_const2(ctx, op) ||
+fold_xi_to_not(ctx, op, 0)) {


Should be fold_ix_to_not (not fold xi_to_not).


No, because for eqv we expect the second operand to be the constant -- eqv is
commutative.


Ah, got it! The previous code was wrong, and I failed to notice that eqv 
would've had its arguments swapped to have the constant as second.


Ah!  I failed to notice that the previous code was wrong.  ;-)


r~



RE: [PATCH v3 34/48] tcg/optimize: Split out fold_to_not

2021-10-25 Thread Luis Fernando Fujita Pires
From: Richard Henderson 

> >>   static bool fold_eqv(OptContext *ctx, TCGOp *op)  {
> >> -return fold_const2(ctx, op);
> >> +if (fold_const2(ctx, op) ||
> >> +fold_xi_to_not(ctx, op, 0)) {
> >
> > Should be fold_ix_to_not (not fold xi_to_not).
> 
> No, because for eqv we expect the second operand to be the constant -- eqv is
> commutative.

Ah, got it! The previous code was wrong, and I failed to notice that eqv 
would've had its arguments swapped to have the constant as second.

--
Luis Pires
Instituto de Pesquisas ELDORADO
Aviso Legal - Disclaimer 


Re: [PATCH v3 34/48] tcg/optimize: Split out fold_to_not

2021-10-25 Thread Richard Henderson

On 10/25/21 7:17 AM, Luis Fernando Fujita Pires wrote:

From: Richard Henderson 

Split out the conditional conversion from a more complex logical operation to a
simple NOT.  Create a couple more helpers to make this easy for the outer-most
logical operations.

Signed-off-by: Richard Henderson 
---
  tcg/optimize.c | 154 +++--
  1 file changed, 86 insertions(+), 68 deletions(-)



  static bool fold_eqv(OptContext *ctx, TCGOp *op)  {
-return fold_const2(ctx, op);
+if (fold_const2(ctx, op) ||
+fold_xi_to_not(ctx, op, 0)) {


Should be fold_ix_to_not (not fold xi_to_not).


No, because for eqv we expect the second operand to be the constant -- eqv is 
commutative.




  static bool fold_orc(OptContext *ctx, TCGOp *op)  {
-return fold_const2(ctx, op);
+if (fold_const2(ctx, op) ||
+fold_xi_to_not(ctx, op, 0)) {


But for orc you are correct.  Thanks.


r~



RE: [PATCH v3 34/48] tcg/optimize: Split out fold_to_not

2021-10-25 Thread Luis Fernando Fujita Pires
From: Richard Henderson 
> Split out the conditional conversion from a more complex logical operation to 
> a
> simple NOT.  Create a couple more helpers to make this easy for the outer-most
> logical operations.
> 
> Signed-off-by: Richard Henderson 
> ---
>  tcg/optimize.c | 154 +++--
>  1 file changed, 86 insertions(+), 68 deletions(-)

>  static bool fold_eqv(OptContext *ctx, TCGOp *op)  {
> -return fold_const2(ctx, op);
> +if (fold_const2(ctx, op) ||
> +fold_xi_to_not(ctx, op, 0)) {

Should be fold_ix_to_not (not fold xi_to_not).

>  static bool fold_orc(OptContext *ctx, TCGOp *op)  {
> -return fold_const2(ctx, op);
> +if (fold_const2(ctx, op) ||
> +fold_xi_to_not(ctx, op, 0)) {

Same here.

--
Luis Pires
Instituto de Pesquisas ELDORADO
Aviso Legal - Disclaimer 



[PATCH v3 34/48] tcg/optimize: Split out fold_to_not

2021-10-21 Thread Richard Henderson
Split out the conditional conversion from a more complex logical
operation to a simple NOT.  Create a couple more helpers to make
this easy for the outer-most logical operations.

Signed-off-by: Richard Henderson 
---
 tcg/optimize.c | 154 +++--
 1 file changed, 86 insertions(+), 68 deletions(-)

diff --git a/tcg/optimize.c b/tcg/optimize.c
index c8b6afc745..71b4c3edb4 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -694,6 +694,52 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
 return false;
 }
 
+/*
+ * Convert @op to NOT, if NOT is supported by the host.
+ * Return true f the conversion is successful, which will still
+ * indicate that the processing is complete.
+ */
+static bool fold_not(OptContext *ctx, TCGOp *op);
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
+{
+TCGOpcode not_op;
+bool have_not;
+
+switch (ctx->type) {
+case TCG_TYPE_I32:
+not_op = INDEX_op_not_i32;
+have_not = TCG_TARGET_HAS_not_i32;
+break;
+case TCG_TYPE_I64:
+not_op = INDEX_op_not_i64;
+have_not = TCG_TARGET_HAS_not_i64;
+break;
+case TCG_TYPE_V64:
+case TCG_TYPE_V128:
+case TCG_TYPE_V256:
+not_op = INDEX_op_not_vec;
+have_not = TCG_TARGET_HAS_not_vec;
+break;
+default:
+g_assert_not_reached();
+}
+if (have_not) {
+op->opc = not_op;
+op->args[1] = op->args[idx];
+return fold_not(ctx, op);
+}
+return false;
+}
+
+/* If the binary operation has first argument @i, fold to NOT. */
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
+return fold_to_not(ctx, op, 2);
+}
+return false;
+}
+
 /* If the binary operation has second argument @i, fold to @i. */
 static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
 {
@@ -703,6 +749,15 @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, 
uint64_t i)
 return false;
 }
 
+/* If the binary operation has second argument @i, fold to NOT. */
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+return fold_to_not(ctx, op, 1);
+}
+return false;
+}
+
 /* If the binary operation has both arguments equal, fold to @i. */
 static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
 {
@@ -781,7 +836,8 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
 static bool fold_andc(OptContext *ctx, TCGOp *op)
 {
 if (fold_const2(ctx, op) ||
-fold_xx_to_i(ctx, op, 0)) {
+fold_xx_to_i(ctx, op, 0) ||
+fold_ix_to_not(ctx, op, -1)) {
 return true;
 }
 return false;
@@ -982,7 +1038,11 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
 
 static bool fold_eqv(OptContext *ctx, TCGOp *op)
 {
-return fold_const2(ctx, op);
+if (fold_const2(ctx, op) ||
+fold_xi_to_not(ctx, op, 0)) {
+return true;
+}
+return false;
 }
 
 static bool fold_extract(OptContext *ctx, TCGOp *op)
@@ -1120,7 +1180,11 @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
 
 static bool fold_nand(OptContext *ctx, TCGOp *op)
 {
-return fold_const2(ctx, op);
+if (fold_const2(ctx, op) ||
+fold_xi_to_not(ctx, op, -1)) {
+return true;
+}
+return false;
 }
 
 static bool fold_neg(OptContext *ctx, TCGOp *op)
@@ -1130,12 +1194,22 @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
 
 static bool fold_nor(OptContext *ctx, TCGOp *op)
 {
-return fold_const2(ctx, op);
+if (fold_const2(ctx, op) ||
+fold_xi_to_not(ctx, op, 0)) {
+return true;
+}
+return false;
 }
 
 static bool fold_not(OptContext *ctx, TCGOp *op)
 {
-return fold_const1(ctx, op);
+if (fold_const1(ctx, op)) {
+return true;
+}
+
+/* Because of fold_to_not, we want to always return true, via finish. */
+finish_folding(ctx, op);
+return true;
 }
 
 static bool fold_or(OptContext *ctx, TCGOp *op)
@@ -1149,7 +1223,11 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
 
 static bool fold_orc(OptContext *ctx, TCGOp *op)
 {
-return fold_const2(ctx, op);
+if (fold_const2(ctx, op) ||
+fold_xi_to_not(ctx, op, 0)) {
+return true;
+}
+return false;
 }
 
 static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
@@ -1280,7 +1358,8 @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
 static bool fold_xor(OptContext *ctx, TCGOp *op)
 {
 if (fold_const2(ctx, op) ||
-fold_xx_to_i(ctx, op, 0)) {
+fold_xx_to_i(ctx, op, 0) ||
+fold_xi_to_not(ctx, op, -1)) {
 return true;
 }
 return false;
@@ -1434,67 +1513,6 @@ void tcg_optimize(TCGContext *s)
 }
 }
 break;
-CASE_OP_32_64_VEC(xor):
-CASE_OP_32_64(nand):
-if (!arg_is_const(op->args[1])
-