https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99407

            Bug ID: 99407
           Summary: s243 benchmark of TSVC is vectorized by clang and not
                    by gcc
           Product: gcc
           Version: 11.0
            Status: UNCONFIRMED
          Severity: normal
          Priority: P3
         Component: middle-end
          Assignee: unassigned at gcc dot gnu.org
          Reporter: hubicka at gcc dot gnu.org
  Target Milestone: ---

This testcase (from TSVC) is about 4 times faster on zen3 when built with
clang.

typedef float real_t;

#define iterations 100000
#define LEN_1D 32000
#define LEN_2D 256
// array definitions
real_t flat_2d_array[LEN_2D*LEN_2D];

real_t x[LEN_1D];

real_t a[LEN_1D],b[LEN_1D],c[LEN_1D],d[LEN_1D],e[LEN_1D],
bb[LEN_2D][LEN_2D],cc[LEN_2D][LEN_2D],tt[LEN_2D][LEN_2D];

int indx[LEN_1D];

real_t* __restrict__ xx;
real_t* yy;
real_t s243(void)
{

//    node splitting
//    false dependence cycle breaking

    for (int nl = 0; nl < iterations; nl++) {
        for (int i = 0; i < LEN_1D-1; i++) {
            a[i] = b[i] + c[i  ] * d[i];
            b[i] = a[i] + d[i  ] * e[i];
            a[i] = b[i] + a[i+1] * d[i];
        }
    }
}

internal loop from clang is:
.LBB0_2:                                #   Parent Loop BB0_1 Depth=1
                                        # =>  This Inner Loop Header: Depth=2
        vmovups c(%rcx), %ymm12
        vmovups c+32(%rcx), %ymm14
        vmovups d(%rcx), %ymm0
        vmovups d+32(%rcx), %ymm7
        vfmadd213ps     b(%rcx), %ymm0, %ymm12  # ymm12 = (ymm0 * ymm12) + mem
        vfmadd213ps     b+32(%rcx), %ymm7, %ymm14 # ymm14 = (ymm7 * ymm14) +
mem
        vfmadd231ps     e(%rcx), %ymm0, %ymm12  # ymm12 = (ymm0 * mem) + ymm12
        vfmadd231ps     e+32(%rcx), %ymm7, %ymm14 # ymm14 = (ymm7 * mem) +
ymm14
        vmovups %ymm12, b(%rcx)
        vmovups %ymm14, b+32(%rcx)
        vfmadd231ps     a+4(%rcx), %ymm0, %ymm12 # ymm12 = (ymm0 * mem) + ymm12
        vfmadd231ps     a+36(%rcx), %ymm7, %ymm14 # ymm14 = (ymm7 * mem) +
ymm14
        vmovups %ymm12, a(%rcx)
        vmovups %ymm14, a+32(%rcx)
        addq    $64, %rcx
        cmpq    $127936, %rcx                   # imm = 0x1F3C0
        jne     .LBB0_2

Reply via email to