On 12/23/18 10:38 PM, Mark Cave-Ayland wrote:
> -#define VEXT_SIGNED(name, element, mask, cast, recast)              \
> +#define VEXT_SIGNED(name, element, access, mask, cast, recast)      \
>  void helper_##name(ppc_avr_t *r, ppc_avr_t *b)                      \
>  {                                                                   \
>      int i;                                                          \
> -    VECTOR_FOR_INORDER_I(i, element) {                              \
> -        r->element[i] = (recast)((cast)(b->element[i] & mask));     \
> +    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
> +        r->access(i) = (recast)((cast)(b->access(i) & mask));       \
>      }                                                               \
>  }
> -VEXT_SIGNED(vextsb2w, s32, UINT8_MAX, int8_t, int32_t)
> -VEXT_SIGNED(vextsb2d, s64, UINT8_MAX, int8_t, int64_t)
> -VEXT_SIGNED(vextsh2w, s32, UINT16_MAX, int16_t, int32_t)
> -VEXT_SIGNED(vextsh2d, s64, UINT16_MAX, int16_t, int64_t)
> -VEXT_SIGNED(vextsw2d, s64, UINT32_MAX, int32_t, int64_t)
> +VEXT_SIGNED(vextsb2w, s32, VsrSW, UINT8_MAX, int8_t, int32_t)
> +VEXT_SIGNED(vextsb2d, s64, VsrSD, UINT8_MAX, int8_t, int64_t)
> +VEXT_SIGNED(vextsh2w, s32, VsrSW, UINT16_MAX, int16_t, int32_t)
> +VEXT_SIGNED(vextsh2d, s64, VsrSD, UINT16_MAX, int16_t, int64_t)
> +VEXT_SIGNED(vextsw2d, s64, VsrSD, UINT32_MAX, int32_t, int64_t)

Your conversion is technically fine, but this macro is just confused.

It does not need the mask argument, nor does it need the recast argument.
The masking is implied by the cast argument, and the recast is implied by the
assignment.

Nor, really, does it need the access argument.  The data is handled in strict
lanes, and it does not matter in which order the lanes are processed.

> -#define VNEG(name, element)                                         \
> +#define VNEG(name, element, access)                                 \
>  void helper_##name(ppc_avr_t *r, ppc_avr_t *b)                      \
>  {                                                                   \
>      int i;                                                          \
> -    VECTOR_FOR_INORDER_I(i, element) {                              \
> -        r->element[i] = -b->element[i];                             \
> +    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
> +        r->access(i) = -b->access(i);                               \
>      }                                                               \
>  }
> -VNEG(vnegw, s32)
> -VNEG(vnegd, s64)
> +VNEG(vnegw, s32, VsrSW)
> +VNEG(vnegd, s64, VsrSD)

Similarly, this does not require access nor in-order processing.

> -#define VGENERIC_DO(name, element)                                      \
> +#define VGENERIC_DO(name, element, access)                              \
>      void helper_v##name(ppc_avr_t *r, ppc_avr_t *b)                     \
>      {                                                                   \
>          int i;                                                          \
>                                                                          \
> -        VECTOR_FOR_INORDER_I(i, element) {                              \
> -            r->element[i] = name(b->element[i]);                        \
> +        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
> +            r->access(i) = name(b->access(i));                          \
>          }                                                               \
>      }

Likewise.


r~

Reply via email to