https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107891
Bug ID: 107891 Summary: Redudant "double" permutation from PR97832 Product: gcc Version: 13.0 Status: UNCONFIRMED Severity: normal Priority: P3 Component: middle-end Assignee: unassigned at gcc dot gnu.org Reporter: crazylht at gmail dot com Target Milestone: --- #include <stddef.h> void foo1x1(double* restrict y, const double* restrict x, int clen) { int xi = clen & 2; double f_re = x[0+xi+0]; double f_im = x[4+xi+0]; ptrdiff_t clen2 = (clen+xi) * 2; //#pragma GCC unroll 0 for (ptrdiff_t c = 0; c < clen2; c += 8) { // y[c] = y[c] - x[c]*conj(f); //#pragma GCC unroll 4 for (ptrdiff_t k = 0; k < 4; ++k) { double x_re = x[c+0+k]; double x_im = x[c+4+k]; double y_re = y[c+0+k]; double y_im = y[c+4+k]; y_re = y_re - x_re * f_re - x_im * f_im;; y_im = y_im + x_re * f_im - x_im * f_re; y[c+0+k] = y_re; y[c+4+k] = y_im; } } } -Ofast -mavx2 -mfma generate extra blendpd compared to -O3 -mavx2 -mfma and blendpd is redundant since there're "doube" permutations for mult operand in FMA. They're computing the same thing since we also do the same "permutation" for the invariants: f_re and f_imm, can we eliminate that in the vectorizer? _232 = {f_im_36, f_im_36, f_im_36, f_im_36}; _231 = {f_im_36, f_re_35, f_re_35, f_re_35}; ------- here _216 = {f_re_35, f_re_35, f_re_35, f_re_35}; _215 = {f_re_35, f_im_36, f_im_36, f_im_36}; ------ and here. ivtmp.36_221 = (unsigned long) y_41(D); ivtmp.38_61 = (unsigned long) x_33(D); <bb 4> [local count: 214748368]: # ivtmp.32_66 = PHI <ivtmp.32_65(4), 0(3)> # ivtmp.36_64 = PHI <ivtmp.36_63(4), ivtmp.36_221(3)> # ivtmp.38_220 = PHI <ivtmp.38_60(4), ivtmp.38_61(3)> # DEBUG c => NULL # DEBUG k => 0 # DEBUG BEGIN_STMT # DEBUG BEGIN_STMT # DEBUG D#78 => D#79 * 8 # DEBUG D#77 => x_33(D) + D#78 _62 = (void *) ivtmp.38_220; vect_x_im_61.13_228 = MEM <const vector(4) double> [(const double *)_62]; vect_x_im_61.14_226 = MEM <const vector(4) double> [(const double *)_62 + 32B]; vect_x_re_55.15_225 = VEC_PERM_EXPR <vect_x_im_61.14_226, vect_x_im_61.13_228, { 0, 5, 6, 7 }>; ----- here. vect_x_re_55.23_209 = VEC_PERM_EXPR <vect_x_im_61.13_228, vect_x_im_61.14_226, { 0, 5, 6, 7 }>; ----- here # DEBUG D#76 => *D#77 # DEBUG x_re => D#76 # DEBUG BEGIN_STMT # DEBUG D#74 => (long unsigned int) D#75 # DEBUG D#73 => D#74 * 8 # DEBUG D#72 => x_33(D) + D#73 # DEBUG D#71 => *D#72 # DEBUG x_im => D#71 # DEBUG BEGIN_STMT # DEBUG D#70 => y_41(D) + D#78 _59 = (void *) ivtmp.36_64; vect_y_re_63.9_235 = MEM <vector(4) double> [(double *)_59]; vect_y_re_63.10_233 = MEM <vector(4) double> [(double *)_59 + 32B]; vect__42.18_219 = .FMA (vect_x_im_61.13_228, _232, vect_y_re_63.10_233); vect_y_re_69.17_222 = .FNMA (vect_x_re_55.15_225, _231, vect_y_re_63.9_235); vect_y_re_69.25_206 = .FNMA (vect_x_re_55.23_209, _215, vect_y_re_69.17_222); vect_y_re_69.25_205 = .FNMA (_216, vect_x_im_61.14_226, vect__42.18_219); and _233 = {f_im_36, f_re_35, f_re_35, f_re_35}; _217 = {f_re_35, f_im_36, f_im_36, f_im_36}; ... vect_x_re_55.15_227 = VEC_PERM_EXPR <vect_x_im_61.14_228, vect_x_im_61.13_230, { 0, 5, 6, 7 }>; vect_x_re_55.23_211 = VEC_PERM_EXPR <vect_x_im_61.13_230, vect_x_im_61.14_228, { 0, 5, 6, 7 }>; ... vect_y_re_69.17_224 = .FNMA (vect_x_re_55.15_227, _233, vect_y_re_63.9_237); vect_y_re_69.25_208 = .FNMA (vect_x_re_55.23_211, _217, vect_y_re_69.17_224); is equal to _233 = {f_im_36,f_im_36, f_im_36, f_im_36} _217 = {f_re_35, f_re_35, f_re_35, f_re_35}; ... vect_y_re_69.17_224 = .FNMA (vect_x_im_61.14_228, _233, vect_y_re_63.9_237) vect_y_re_69.25_208 = .FNMA (vect_x_im_61.13_230, _217, vect_y_re_69.17_224) A simplication in match.pd?