Excerpts from Richard Biener's message of März 3, 2024 11:41 am:
> 
> 
>> Am 03.03.2024 um 02:51 schrieb Iain Buclaw <ibuc...@gdcproject.org>:
>> 
>> Hi,
>> 
>> This patch fixes a wrong code issue in the D front-end where lowered
>> struct comparisons would reinterpret fields with a different (usually
>> bigger) alignment than the original.  Use `build_aligned_type' to
>> preserve the alignment when casting away for such comparisons.
>> 
>> Bootstrapped and regression tested on x86_64-linux-gnu/-m32, committed
>> to mainline, and backported to releases/gcc-13, releases/gcc-12, and
>> releases/gcc-11.
> 
> LGTM.  You might want to experiment with not doing the premature optimization 
> but instead use __builtin_memcmp_eq (assuming that’s a general good fit).  
> The middle-end should better turn that into target optimal code.
> 

Indeed, just looking at the history, it was introduced well over ten
years ago so I can't comment on the original context for it (it doesn't
directly fix any old issues).

Small comparison between this optimization and memcmp_eq
---
  _5 = newitem ();
  # DEBUG bn => _5
  _1 = MEM[(ucent *)_5 + 8B];
  _2 = _1 != 0;
  _6 = (int) _2;
  return _6;
---
        call    _D8pr1141717newitemFNbNiZPSQz2S2
.LVL29:
        vmovdqu 8(%rax), %xmm0
        xorl    %eax, %eax
.LVL30:
        vptest  %xmm0, %xmm0
        setne   %al
        addq    $8, %rsp
        ret


---
  _6 = newitem ();
  # DEBUG bn => _6
  D.2335.length = 0;
  D.2335.ptr = 0B;
  _1 = &_6->label.label;
  _2 = __builtin_memcmp_eq (_1, &D.2335, 16);
  _3 = _2 != 0;
  _9 = (int) _3;
  return _9;
---
        call    _D8pr1141717newitemFNbNiZPSQz2S2
.LVL29:
        movq    $0, (%rsp)
        movq    $0, 8(%rsp)
        vmovdqu 8(%rax), %xmm0
        xorl    %eax, %eax
.LVL30:
        vpxor   (%rsp), %xmm0, %xmm0
        vptest  %xmm0, %xmm0
        setne   %al
        addq    $24, %rsp
        ret

Reply via email to