Hello! Attached patch uses ssememalign attribute to reject insn combinations where memory operands would be misaligned.
2015-11-12 Uros Bizjak <ubiz...@gmail.com> * config/i386/i386.c (ix86_legitimate_combined_insn): Reject combined insn if the alignment of vector mode memory operand is less than ssememalign. testsuite/ChangeLog: 2015-11-12 Uros Bizjak <ubiz...@gmail.com> * gcc.target/i386/sse-1.c (swizzle): Assume that a is aligned to 64 bits. Patch was bootstrapped and regression tested on x86_64-linux-gnu {,-m32}, committed to mainline SVN. Uros.
Index: config/i386/i386.c =================================================================== --- config/i386/i386.c (revision 230213) +++ config/i386/i386.c (working copy) @@ -7236,11 +7236,12 @@ ix86_legitimate_combined_insn (rtx_insn *insn) /* For pre-AVX disallow unaligned loads/stores where the instructions don't support it. */ if (!TARGET_AVX - && VECTOR_MODE_P (GET_MODE (op)) - && misaligned_operand (op, GET_MODE (op))) + && VECTOR_MODE_P (mode) + && misaligned_operand (op, mode)) { - int min_align = get_attr_ssememalign (insn); - if (min_align == 0) + unsigned int min_align = get_attr_ssememalign (insn); + if (min_align == 0 + || MEM_ALIGN (op) < min_align) return false; } Index: testsuite/gcc.target/i386/sse-1.c =================================================================== --- testsuite/gcc.target/i386/sse-1.c (revision 230213) +++ testsuite/gcc.target/i386/sse-1.c (working copy) @@ -14,8 +14,10 @@ typedef union void swizzle (const void *a, vector4_t * b, vector4_t * c) { - b->v = _mm_loadl_pi (b->v, (__m64 *) a); - c->v = _mm_loadl_pi (c->v, ((__m64 *) a) + 1); + __m64 *t = __builtin_assume_aligned (a, 64); + + b->v = _mm_loadl_pi (b->v, t); + c->v = _mm_loadl_pi (c->v, t + 1); } /* While one legal rendering of each statement would be movaps;movlps;movaps,