Howdy,

Marc Mutz, a colleague of mine, is stumbling over ubsan reporting
misaligned stores in sljit code (when used from QRegularExpression).

You can see some of the discussion going on here:

https://codereview.qt-project.org/#/c/165159/

I'm attaching his patch (against PCRE SVN HEAD), which makes "make
check" succeed in a ubsan build of PCRE. Without the patch, it
otherwise fails with http://pastebin.com/UagaK2As . The patch needs to
be slightly reworked anyhow because the compiler emits some conversion
warnings (see the above link).

Any feedback is kindly appreciated :)

Cheers,
-- 
Giuseppe D'Angelo
Index: sljit/sljitConfigInternal.h
===================================================================
--- sljit/sljitConfigInternal.h (revision 1661)
+++ sljit/sljitConfigInternal.h (working copy)
@@ -214,6 +214,10 @@
 #define SLJIT_MEMMOVE(dest, src, len) memmove(dest, src, len)
 #endif
 
+#ifndef SLJIT_MEMCPY
+#define SLJIT_MEMCPY(dest, src, len) memcpy(dest, src, len)
+#endif
+
 #ifndef SLJIT_ZEROMEM
 #define SLJIT_ZEROMEM(dest, len) memset(dest, 0, len)
 #endif
@@ -409,6 +413,16 @@
 
 #endif /* !SLJIT_W */
 
+/******************************************************/
+/*    Unaligned-store functions                       */
+/******************************************************/
+
+static SLJIT_INLINE void sljit_unaligned_store_s32(void *addr, sljit_s32 value)
+{ SLJIT_MEMCPY(addr, &value, sizeof(value)); }
+
+static SLJIT_INLINE void sljit_unaligned_store_sw(void *addr, sljit_sw value)
+{ SLJIT_MEMCPY(addr, &value, sizeof(value)); }
+
 /*************************/
 /* Endianness detection. */
 /*************************/
Index: sljit/sljitNativeX86_64.c
===================================================================
--- sljit/sljitNativeX86_64.c   (revision 1661)
+++ sljit/sljitNativeX86_64.c   (working copy)
@@ -35,7 +35,7 @@
        INC_SIZE(2 + sizeof(sljit_sw));
        *inst++ = REX_W | ((reg_map[reg] <= 7) ? 0 : REX_B);
        *inst++ = MOV_r_i32 + (reg_map[reg] & 0x7);
-       *(sljit_sw*)inst = imm;
+       sljit_unaligned_store_sw(inst, imm);
        return SLJIT_SUCCESS;
 }
 
@@ -219,7 +219,7 @@
                *inst++ = REX_W;
                *inst++ = GROUP_BINARY_81;
                *inst++ = MOD_REG | SUB | 4;
-               *(sljit_s32*)inst = local_size;
+               sljit_unaligned_store_s32(inst, local_size);
                inst += sizeof(sljit_s32);
        }
 
@@ -292,7 +292,7 @@
                *inst++ = REX_W;
                *inst++ = GROUP_BINARY_81;
                *inst++ = MOD_REG | ADD | 4;
-               *(sljit_s32*)inst = compiler->local_size;
+               sljit_unaligned_store_s32(inst, compiler->local_size);
        }
 
        tmp = compiler->scratches;
@@ -339,7 +339,7 @@
        if (rex)
                *inst++ = rex;
        *inst++ = opcode;
-       *(sljit_s32*)inst = imm;
+       sljit_unaligned_store_s32(inst, imm);
        return SLJIT_SUCCESS;
 }
 
@@ -354,6 +354,7 @@
        sljit_u8 rex = 0;
        sljit_s32 flags = size & ~0xf;
        sljit_s32 inst_size;
+       sljit_s32 tmp_s32;
 
        /* The immediate operand must be 32 bit. */
        SLJIT_ASSERT(!(a & SLJIT_IMM) || compiler->mode32 || IS_HALFWORD(imma));
@@ -516,7 +517,7 @@
                                if (immb <= 127 && immb >= -128)
                                        *buf_ptr++ = immb; /* 8 bit 
displacement. */
                                else {
-                                       *(sljit_s32*)buf_ptr = immb; /* 32 bit 
displacement. */
+                                       sljit_unaligned_store_s32(buf_ptr, 
immb); /* 32 bit displacement. */
                                        buf_ptr += sizeof(sljit_s32);
                                }
                        }
@@ -543,7 +544,7 @@
                else if (flags & EX86_HALF_ARG)
                        *(short*)buf_ptr = imma;
                else if (!(flags & EX86_SHIFT_INS))
-                       *(sljit_s32*)buf_ptr = imma;
+                       sljit_unaligned_store_s32(buf_ptr, imma);
        }
 
        return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1);
Index: sljit/sljitNativeX86_common.c
===================================================================
--- sljit/sljitNativeX86_common.c       (revision 1661)
+++ sljit/sljitNativeX86_common.c       (working copy)
@@ -534,7 +534,7 @@
                                *(sljit_sw*)jump->addr = 
(sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_sw)));
 #else
                                SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - 
(jump->addr + sizeof(sljit_s32))) >= HALFWORD_MIN && 
(sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_s32))) <= 
HALFWORD_MAX);
-                               *(sljit_s32*)jump->addr = 
(sljit_s32)(jump->u.label->addr - (jump->addr + sizeof(sljit_s32)));
+                               sljit_unaligned_store_s32(jump->addr, 
(sljit_s32)(jump->u.label->addr - (jump->addr + sizeof(sljit_s32))));
 #endif
                        }
                        else {
@@ -542,7 +542,7 @@
                                *(sljit_sw*)jump->addr = 
(sljit_sw)(jump->u.target - (jump->addr + sizeof(sljit_sw)));
 #else
                                SLJIT_ASSERT((sljit_sw)(jump->u.target - 
(jump->addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.target 
- (jump->addr + sizeof(sljit_s32))) <= HALFWORD_MAX);
-                               *(sljit_s32*)jump->addr = 
(sljit_s32)(jump->u.target - (jump->addr + sizeof(sljit_s32)));
+                               sljit_unaligned_store_s32(jump->addr, 
(sljit_s32)(jump->u.target - (jump->addr + sizeof(sljit_s32))));
 #endif
                        }
                }
-- 
## List details at https://lists.exim.org/mailman/listinfo/pcre-dev 

Reply via email to