Modified: tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_32.c
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_32.c?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
--- tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_32.c (original)
+++ tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_32.c Fri Aug 24 
14:36:12 2018
@@ -64,29 +64,28 @@ static sljit_u8* generate_far_jump_code(
 }
 
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler 
*compiler,
-       sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 
saveds,
+       sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 
saveds,
        sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
 {
-       sljit_s32 size;
+       sljit_s32 args, size;
        sljit_u8 *inst;
 
        CHECK_ERROR();
-       CHECK(check_sljit_emit_enter(compiler, options, args, scratches, 
saveds, fscratches, fsaveds, local_size));
-       set_emit_enter(compiler, options, args, scratches, saveds, fscratches, 
fsaveds, local_size);
+       CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, 
saveds, fscratches, fsaveds, local_size));
+       set_emit_enter(compiler, options, arg_types, scratches, saveds, 
fscratches, fsaveds, local_size);
 
+       args = get_arg_count(arg_types);
        compiler->args = args;
 
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
-       /* [esp+0] for saving temporaries and third argument for calls. */
-       compiler->saveds_offset = 1 * sizeof(sljit_sw);
-#else
-       /* [esp+0] for saving temporaries and space for maximum three 
arguments. */
-       if (scratches <= 1)
-               compiler->saveds_offset = 1 * sizeof(sljit_sw);
-       else
-               compiler->saveds_offset = ((scratches == 2) ? 2 : 3) * 
sizeof(sljit_sw);
+       /* [esp+0] for saving temporaries and function calls. */
+       compiler->stack_tmp_size = 2 * sizeof(sljit_sw);
+
+#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+       if (scratches > 3)
+               compiler->stack_tmp_size = 3 * sizeof(sljit_sw);
 #endif
 
+       compiler->saveds_offset = compiler->stack_tmp_size;
        if (scratches > 3)
                compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : 
(scratches - 3)) * sizeof(sljit_sw);
 
@@ -124,34 +123,38 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
 
 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
        if (args > 0) {
-               *inst++ = MOV_r_rm;
-               *inst++ = MOD_REG | (reg_map[SLJIT_S0] << 3) | 
reg_map[SLJIT_R2];
+               inst[0] = MOV_r_rm;
+               inst[1] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 
reg_map[SLJIT_R2];
+               inst += 2;
        }
        if (args > 1) {
-               *inst++ = MOV_r_rm;
-               *inst++ = MOD_REG | (reg_map[SLJIT_S1] << 3) | 
reg_map[SLJIT_R1];
+               inst[0] = MOV_r_rm;
+               inst[1] = MOD_REG | (reg_map[SLJIT_S1] << 3) | 
reg_map[SLJIT_R1];
+               inst += 2;
        }
        if (args > 2) {
-               *inst++ = MOV_r_rm;
-               *inst++ = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | 0x4 /* esp */;
-               *inst++ = 0x24;
-               *inst++ = sizeof(sljit_sw) * (3 + 2); /* saveds >= 3 as well. */
+               inst[0] = MOV_r_rm;
+               inst[1] = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | 0x4 /* esp */;
+               inst[2] = 0x24;
+               inst[3] = sizeof(sljit_sw) * (3 + 2); /* saveds >= 3 as well. */
        }
 #else
        if (args > 0) {
-               *inst++ = MOV_r_rm;
-               *inst++ = MOD_DISP8 | (reg_map[SLJIT_S0] << 3) | 
reg_map[TMP_REG1];
-               *inst++ = sizeof(sljit_sw) * 2;
+               inst[0] = MOV_r_rm;
+               inst[1] = MOD_DISP8 | (reg_map[SLJIT_S0] << 3) | 
reg_map[TMP_REG1];
+               inst[2] = sizeof(sljit_sw) * 2;
+               inst += 3;
        }
        if (args > 1) {
-               *inst++ = MOV_r_rm;
-               *inst++ = MOD_DISP8 | (reg_map[SLJIT_S1] << 3) | 
reg_map[TMP_REG1];
-               *inst++ = sizeof(sljit_sw) * 3;
+               inst[0] = MOV_r_rm;
+               inst[1] = MOD_DISP8 | (reg_map[SLJIT_S1] << 3) | 
reg_map[TMP_REG1];
+               inst[2] = sizeof(sljit_sw) * 3;
+               inst += 3;
        }
        if (args > 2) {
-               *inst++ = MOV_r_rm;
-               *inst++ = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | 
reg_map[TMP_REG1];
-               *inst++ = sizeof(sljit_sw) * 4;
+               inst[0] = MOV_r_rm;
+               inst[1] = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | 
reg_map[TMP_REG1];
+               inst[2] = sizeof(sljit_sw) * 4;
        }
 #endif
 
@@ -171,17 +174,36 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
        compiler->local_size = local_size;
 
 #ifdef _WIN32
-       if (local_size > 1024) {
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
-               FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], 
local_size));
-#else
-               /* Space for a single argument. This amount is excluded when 
the stack is allocated below. */
-               local_size -= sizeof(sljit_sw);
-               FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], 
local_size));
-               FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, 
SUB_EAX_i32,
-                       SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, sizeof(sljit_sw)));
-#endif
-               FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, 
SLJIT_FUNC_OFFSET(sljit_grow_stack)));
+       if (local_size > 0) {
+               if (local_size <= 4 * 4096) {
+                       if (local_size > 4096)
+                               EMIT_MOV(compiler, TMP_REG1, 0, 
SLJIT_MEM1(SLJIT_SP), -4096);
+                       if (local_size > 2 * 4096)
+                               EMIT_MOV(compiler, TMP_REG1, 0, 
SLJIT_MEM1(SLJIT_SP), -4096 * 2);
+                       if (local_size > 3 * 4096)
+                               EMIT_MOV(compiler, TMP_REG1, 0, 
SLJIT_MEM1(SLJIT_SP), -4096 * 3);
+               }
+               else {
+                       EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_SP, 0);
+                       EMIT_MOV(compiler, SLJIT_R1, 0, SLJIT_IMM, (local_size 
- 1) >> 12);
+
+                       SLJIT_ASSERT (reg_map[SLJIT_R0] == 0);
+
+                       EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_R0), 
-4096);
+                       FAIL_IF(emit_non_cum_binary(compiler, 
BINARY_OPCODE(SUB),
+                               SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 4096));
+                       FAIL_IF(emit_non_cum_binary(compiler, 
BINARY_OPCODE(SUB),
+                               SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1));
+
+                       inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+                       FAIL_IF(!inst);
+
+                       INC_SIZE(2);
+                       inst[0] = JNE_i8;
+                       inst[1] = (sljit_s8) -16;
+               }
+
+               EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), 
-local_size);
        }
 #endif
 
@@ -192,12 +214,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_SP, 0);
 
                /* Some space might allocated during sljit_grow_stack() above 
on WIN32. */
-               FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, 
SUB_EAX_i32,
+               FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
                        SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size + 
sizeof(sljit_sw)));
 
 #if defined _WIN32 && !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
                if (compiler->local_size > 1024)
-                       FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, 
ADD, ADD_EAX_i32,
+                       FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
                                TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, 
sizeof(sljit_sw)));
 #endif
 
@@ -213,31 +235,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), 
compiler->local_size, TMP_REG1, 0);
        }
 #endif
-       return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, 
SUB_EAX_i32,
+       return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
                SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size);
 }
 
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler 
*compiler,
-       sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 
saveds,
+       sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 
saveds,
        sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
 {
        CHECK_ERROR();
-       CHECK(check_sljit_set_context(compiler, options, args, scratches, 
saveds, fscratches, fsaveds, local_size));
-       set_set_context(compiler, options, args, scratches, saveds, fscratches, 
fsaveds, local_size);
+       CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, 
saveds, fscratches, fsaveds, local_size));
+       set_set_context(compiler, options, arg_types, scratches, saveds, 
fscratches, fsaveds, local_size);
 
-       compiler->args = args;
+       compiler->args = get_arg_count(arg_types);
 
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
-       /* [esp+0] for saving temporaries and third argument for calls. */
-       compiler->saveds_offset = 1 * sizeof(sljit_sw);
-#else
-       /* [esp+0] for saving temporaries and space for maximum three 
arguments. */
-       if (scratches <= 1)
-               compiler->saveds_offset = 1 * sizeof(sljit_sw);
-       else
-               compiler->saveds_offset = ((scratches == 2) ? 2 : 3) * 
sizeof(sljit_sw);
+       /* [esp+0] for saving temporaries and function calls. */
+       compiler->stack_tmp_size = 2 * sizeof(sljit_sw);
+
+#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+       if (scratches > 3)
+               compiler->stack_tmp_size = 3 * sizeof(sljit_sw);
 #endif
 
+       compiler->saveds_offset = compiler->stack_tmp_size;
        if (scratches > 3)
                compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : 
(scratches - 3)) * sizeof(sljit_sw);
 
@@ -278,10 +298,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
        if (compiler->options & SLJIT_F64_ALIGNMENT)
                EMIT_MOV(compiler, SLJIT_SP, 0, SLJIT_MEM1(SLJIT_SP), 
compiler->local_size)
        else
-               FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, 
ADD_EAX_i32,
+               FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
                        SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, 
compiler->local_size));
 #else
-       FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
+       FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
                SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size));
 #endif
 
@@ -418,7 +438,7 @@ static sljit_u8* emit_x86_instruction(st
                if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
                        *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : 
GROUP_BINARY_81;
 
-               if ((a & SLJIT_IMM) || (a == 0))
+               if (a & SLJIT_IMM)
                        *buf_ptr = 0;
                else if (!(flags & EX86_SSE2_OP1))
                        *buf_ptr = reg_map[a] << 3;
@@ -490,42 +510,324 @@ static sljit_u8* emit_x86_instruction(st
 /*  Call / return instructions                                           */
 /* --------------------------------------------------------------------- */
 
-static SLJIT_INLINE sljit_s32 call_with_args(struct sljit_compiler *compiler, 
sljit_s32 type)
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+
+static sljit_s32 c_fast_call_get_stack_size(sljit_s32 arg_types, sljit_s32 
*word_arg_count_ptr)
 {
-       sljit_u8 *inst;
+       sljit_s32 stack_size = 0;
+       sljit_s32 word_arg_count = 0;
 
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
-       inst = (sljit_u8*)ensure_buf(compiler, type >= SLJIT_CALL3 ? 1 + 2 + 1 
: 1 + 2);
-       FAIL_IF(!inst);
-       INC_SIZE(type >= SLJIT_CALL3 ? 2 + 1 : 2);
+       arg_types >>= SLJIT_DEF_SHIFT;
+
+       while (arg_types) {
+               switch (arg_types & SLJIT_DEF_MASK) {
+               case SLJIT_ARG_TYPE_F32:
+                       stack_size += sizeof(sljit_f32);
+                       break;
+               case SLJIT_ARG_TYPE_F64:
+                       stack_size += sizeof(sljit_f64);
+                       break;
+               default:
+                       word_arg_count++;
+                       if (word_arg_count > 2)
+                               stack_size += sizeof(sljit_sw);
+                       break;
+               }
 
-       if (type >= SLJIT_CALL3)
+               arg_types >>= SLJIT_DEF_SHIFT;
+       }
+
+       if (word_arg_count_ptr)
+               *word_arg_count_ptr = word_arg_count;
+
+       return stack_size;
+}
+
+static sljit_s32 c_fast_call_with_args(struct sljit_compiler *compiler,
+       sljit_s32 arg_types, sljit_s32 stack_size, sljit_s32 word_arg_count, 
sljit_s32 swap_args)
+{
+       sljit_u8 *inst;
+       sljit_s32 float_arg_count;
+
+       if (stack_size == sizeof(sljit_sw) && word_arg_count == 3) {
+               inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
+               FAIL_IF(!inst);
+               INC_SIZE(1);
                PUSH_REG(reg_map[SLJIT_R2]);
-       *inst++ = MOV_r_rm;
-       *inst++ = MOD_REG | (reg_map[SLJIT_R2] << 3) | reg_map[SLJIT_R0];
+       }
+       else if (stack_size > 0) {
+               if (word_arg_count >= 4)
+                       EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), 
compiler->saveds_offset - sizeof(sljit_sw));
+
+               FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
+                       SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size));
+
+               stack_size = 0;
+               arg_types >>= SLJIT_DEF_SHIFT;
+               word_arg_count = 0;
+               float_arg_count = 0;
+               while (arg_types) {
+                       switch (arg_types & SLJIT_DEF_MASK) {
+                       case SLJIT_ARG_TYPE_F32:
+                               float_arg_count++;
+                               FAIL_IF(emit_sse2_store(compiler, 1, 
SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
+                               stack_size += sizeof(sljit_f32);
+                               break;
+                       case SLJIT_ARG_TYPE_F64:
+                               float_arg_count++;
+                               FAIL_IF(emit_sse2_store(compiler, 0, 
SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
+                               stack_size += sizeof(sljit_f64);
+                               break;
+                       default:
+                               word_arg_count++;
+                               if (word_arg_count == 3) {
+                                       EMIT_MOV(compiler, 
SLJIT_MEM1(SLJIT_SP), stack_size, SLJIT_R2, 0);
+                                       stack_size += sizeof(sljit_sw);
+                               }
+                               else if (word_arg_count == 4) {
+                                       EMIT_MOV(compiler, 
SLJIT_MEM1(SLJIT_SP), stack_size, TMP_REG1, 0);
+                                       stack_size += sizeof(sljit_sw);
+                               }
+                               break;
+                       }
+
+                       arg_types >>= SLJIT_DEF_SHIFT;
+               }
+       }
+
+       if (word_arg_count > 0) {
+               if (swap_args) {
+                       inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
+                       FAIL_IF(!inst);
+                       INC_SIZE(1);
+
+                       *inst++ = XCHG_EAX_r | reg_map[SLJIT_R2];
+               }
+               else {
+                       inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+                       FAIL_IF(!inst);
+                       INC_SIZE(2);
+
+                       *inst++ = MOV_r_rm;
+                       *inst++ = MOD_REG | (reg_map[SLJIT_R2] << 3) | 
reg_map[SLJIT_R0];
+               }
+       }
+
+       return SLJIT_SUCCESS;
+}
+
+#endif
+
+static sljit_s32 cdecl_call_get_stack_size(struct sljit_compiler *compiler, 
sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr)
+{
+       sljit_s32 stack_size = 0;
+       sljit_s32 word_arg_count = 0;
+
+       arg_types >>= SLJIT_DEF_SHIFT;
+
+       while (arg_types) {
+               switch (arg_types & SLJIT_DEF_MASK) {
+               case SLJIT_ARG_TYPE_F32:
+                       stack_size += sizeof(sljit_f32);
+                       break;
+               case SLJIT_ARG_TYPE_F64:
+                       stack_size += sizeof(sljit_f64);
+                       break;
+               default:
+                       word_arg_count++;
+                       stack_size += sizeof(sljit_sw);
+                       break;
+               }
+
+               arg_types >>= SLJIT_DEF_SHIFT;
+       }
+
+       if (word_arg_count_ptr)
+               *word_arg_count_ptr = word_arg_count;
+
+       if (stack_size <= compiler->stack_tmp_size)
+               return 0;
+
+#if defined(__APPLE__)
+       return ((stack_size - compiler->stack_tmp_size + 15) & ~15);
 #else
-       inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 * (type - SLJIT_CALL0));
+       return stack_size - compiler->stack_tmp_size;
+#endif
+}
+
+static sljit_s32 cdecl_call_with_args(struct sljit_compiler *compiler,
+       sljit_s32 arg_types, sljit_s32 stack_size, sljit_s32 word_arg_count)
+{
+       sljit_s32 float_arg_count = 0;
+
+       if (word_arg_count >= 4)
+               EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), 
compiler->saveds_offset - sizeof(sljit_sw));
+
+       if (stack_size > 0)
+               FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
+                       SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size));
+
+       stack_size = 0;
+       word_arg_count = 0;
+       arg_types >>= SLJIT_DEF_SHIFT;
+
+       while (arg_types) {
+               switch (arg_types & SLJIT_DEF_MASK) {
+               case SLJIT_ARG_TYPE_F32:
+                       float_arg_count++;
+                       FAIL_IF(emit_sse2_store(compiler, 1, 
SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
+                       stack_size += sizeof(sljit_f32);
+                       break;
+               case SLJIT_ARG_TYPE_F64:
+                       float_arg_count++;
+                       FAIL_IF(emit_sse2_store(compiler, 0, 
SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
+                       stack_size += sizeof(sljit_f64);
+                       break;
+               default:
+                       word_arg_count++;
+                       EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, 
(word_arg_count >= 4) ? TMP_REG1 : word_arg_count, 0);
+                       stack_size += sizeof(sljit_sw);
+                       break;
+               }
+
+               arg_types >>= SLJIT_DEF_SHIFT;
+       }
+
+       return SLJIT_SUCCESS;
+}
+
+static sljit_s32 post_call_with_args(struct sljit_compiler *compiler,
+       sljit_s32 arg_types, sljit_s32 stack_size)
+{
+       sljit_u8 *inst;
+       sljit_s32 single;
+
+       if (stack_size > 0)
+               FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
+                       SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size));
+
+       if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32)
+               return SLJIT_SUCCESS;
+
+       single = ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32);
+
+       inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
        FAIL_IF(!inst);
-       INC_SIZE(4 * (type - SLJIT_CALL0));
+       INC_SIZE(3);
+       inst[0] = single ? FSTPS : FSTPD;
+       inst[1] = (0x03 << 3) | 0x04;
+       inst[2] = (0x04 << 3) | reg_map[SLJIT_SP];
+
+       return emit_sse2_load(compiler, single, SLJIT_FR0, 
SLJIT_MEM1(SLJIT_SP), 0);
+}
 
-       *inst++ = MOV_rm_r;
-       *inst++ = MOD_DISP8 | (reg_map[SLJIT_R0] << 3) | 0x4 /* SIB */;
-       *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP];
-       *inst++ = 0;
-       if (type >= SLJIT_CALL2) {
-               *inst++ = MOV_rm_r;
-               *inst++ = MOD_DISP8 | (reg_map[SLJIT_R1] << 3) | 0x4 /* SIB */;
-               *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP];
-               *inst++ = sizeof(sljit_sw);
-       }
-       if (type >= SLJIT_CALL3) {
-               *inst++ = MOV_rm_r;
-               *inst++ = MOD_DISP8 | (reg_map[SLJIT_R2] << 3) | 0x4 /* SIB */;
-               *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP];
-               *inst++ = 2 * sizeof(sljit_sw);
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct 
sljit_compiler *compiler, sljit_s32 type,
+       sljit_s32 arg_types)
+{
+       struct sljit_jump *jump;
+       sljit_s32 stack_size = 0;
+       sljit_s32 word_arg_count;
+
+       CHECK_ERROR_PTR();
+       CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+       if ((type & 0xff) == SLJIT_CALL) {
+               stack_size = c_fast_call_get_stack_size(arg_types, 
&word_arg_count);
+               PTR_FAIL_IF(c_fast_call_with_args(compiler, arg_types, 
stack_size, word_arg_count, 0));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+               || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+               compiler->skip_checks = 1;
+#endif
+
+               jump = sljit_emit_jump(compiler, type);
+               PTR_FAIL_IF(jump == NULL);
+
+               PTR_FAIL_IF(post_call_with_args(compiler, arg_types, 0));
+               return jump;
        }
 #endif
-       return SLJIT_SUCCESS;
+
+       stack_size = cdecl_call_get_stack_size(compiler, arg_types, 
&word_arg_count);
+       PTR_FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, 
word_arg_count));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+               || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+       compiler->skip_checks = 1;
+#endif
+
+       jump = sljit_emit_jump(compiler, type);
+       PTR_FAIL_IF(jump == NULL);
+
+       PTR_FAIL_IF(post_call_with_args(compiler, arg_types, stack_size));
+       return jump;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler 
*compiler, sljit_s32 type,
+       sljit_s32 arg_types,
+       sljit_s32 src, sljit_sw srcw)
+{
+       sljit_s32 stack_size = 0;
+       sljit_s32 word_arg_count;
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+       sljit_s32 swap_args;
+#endif
+
+       CHECK_ERROR();
+       CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+       SLJIT_ASSERT(reg_map[SLJIT_R0] == 0 && reg_map[SLJIT_R2] == 1 && 
SLJIT_R0 == 1 && SLJIT_R2 == 3);
+
+       if ((type & 0xff) == SLJIT_CALL) {
+               stack_size = c_fast_call_get_stack_size(arg_types, 
&word_arg_count);
+               swap_args = 0;
+
+               if (word_arg_count > 0) {
+                       if ((src & REG_MASK) == SLJIT_R2 || OFFS_REG(src) == 
SLJIT_R2) {
+                               swap_args = 1;
+                               if (((src & REG_MASK) | 0x2) == SLJIT_R2)
+                                       src ^= 0x2;
+                               if ((OFFS_REG(src) | 0x2) == SLJIT_R2)
+                                       src ^= TO_OFFS_REG(0x2);
+                       }
+               }
+
+               FAIL_IF(c_fast_call_with_args(compiler, arg_types, stack_size, 
word_arg_count, swap_args));
+
+               compiler->saveds_offset += stack_size;
+               compiler->locals_offset += stack_size;
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+               || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+               compiler->skip_checks = 1;
+#endif
+               FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
+
+               compiler->saveds_offset -= stack_size;
+               compiler->locals_offset -= stack_size;
+
+               return post_call_with_args(compiler, arg_types, 0);
+       }
+#endif
+
+       stack_size = cdecl_call_get_stack_size(compiler, arg_types, 
&word_arg_count);
+       FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, 
word_arg_count));
+
+       compiler->saveds_offset += stack_size;
+       compiler->locals_offset += stack_size;
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+               || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+       compiler->skip_checks = 1;
+#endif
+       FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
+
+       compiler->saveds_offset -= stack_size;
+       compiler->locals_offset -= stack_size;
+
+       return post_call_with_args(compiler, arg_types, stack_size);
 }
 
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler 
*compiler, sljit_s32 dst, sljit_sw dstw)
@@ -576,7 +878,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                INC_SIZE(1 + 1);
                PUSH_REG(reg_map[src]);
        }
-       else if (src & SLJIT_MEM) {
+       else {
                inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
                FAIL_IF(!inst);
                *inst++ = GROUP_FF;
@@ -586,16 +888,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                FAIL_IF(!inst);
                INC_SIZE(1);
        }
-       else {
-               /* SLJIT_IMM. */
-               inst = (sljit_u8*)ensure_buf(compiler, 1 + 5 + 1);
-               FAIL_IF(!inst);
-
-               INC_SIZE(5 + 1);
-               *inst++ = PUSH_i32;
-               sljit_unaligned_store_sw(inst, srcw);
-               inst += sizeof(sljit_sw);
-       }
 
        RET();
        return SLJIT_SUCCESS;

Modified: tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_64.c
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_64.c?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
--- tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_64.c (original)
+++ tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_64.c Fri Aug 24 
14:36:12 2018
@@ -41,24 +41,31 @@ static sljit_s32 emit_load_imm64(struct
 
 static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 
*code_ptr, sljit_s32 type)
 {
+       int short_addr = !(jump->flags & SLJIT_REWRITABLE_JUMP) && 
!(jump->flags & JUMP_LABEL) && (jump->u.target <= 0xffffffff);
+
+       /* The relative jump below specialized for this case. */
+       SLJIT_ASSERT(reg_map[TMP_REG2] >= 8);
+
        if (type < SLJIT_JUMP) {
                /* Invert type. */
                *code_ptr++ = get_jump_code(type ^ 0x1) - 0x10;
-               *code_ptr++ = 10 + 3;
+               *code_ptr++ = short_addr ? (6 + 3) : (10 + 3);
        }
 
-       *code_ptr++ = REX_W | ((reg_map[TMP_REG2] <= 7) ? 0 : REX_B);
+       *code_ptr++ = short_addr ? REX_B : (REX_W | REX_B);
        *code_ptr++ = MOV_r_i32 | reg_lmap[TMP_REG2];
        jump->addr = (sljit_uw)code_ptr;
 
        if (jump->flags & JUMP_LABEL)
                jump->flags |= PATCH_MD;
+       else if (short_addr)
+               sljit_unaligned_store_s32(code_ptr, (sljit_s32)jump->u.target);
        else
                sljit_unaligned_store_sw(code_ptr, jump->u.target);
 
-       code_ptr += sizeof(sljit_sw);
-       if (reg_map[TMP_REG2] >= 8)
-               *code_ptr++ = REX_B;
+       code_ptr += short_addr ? sizeof(sljit_s32) : sizeof(sljit_sw);
+
+       *code_ptr++ = REX_B;
        *code_ptr++ = GROUP_FF;
        *code_ptr++ = MOD_REG | (type >= SLJIT_FAST_CALL ? CALL_rm : JMP_rm) | 
reg_lmap[TMP_REG2];
 
@@ -66,15 +73,17 @@ static sljit_u8* generate_far_jump_code(
 }
 
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler 
*compiler,
-       sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 
saveds,
+       sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 
saveds,
        sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
 {
-       sljit_s32 i, tmp, size, saved_register_size;
+       sljit_s32 args, i, tmp, size, saved_register_size;
        sljit_u8 *inst;
 
        CHECK_ERROR();
-       CHECK(check_sljit_emit_enter(compiler, options, args, scratches, 
saveds, fscratches, fsaveds, local_size));
-       set_emit_enter(compiler, options, args, scratches, saveds, fscratches, 
fsaveds, local_size);
+       CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, 
saveds, fscratches, fsaveds, local_size));
+       set_emit_enter(compiler, options, arg_types, scratches, saveds, 
fscratches, fsaveds, local_size);
+
+       compiler->mode32 = 0;
 
 #ifdef _WIN64
        /* Two/four register slots for parameters plus space for xmm6 register 
if needed. */
@@ -108,6 +117,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                PUSH_REG(reg_lmap[i]);
        }
 
+       args = get_arg_count(arg_types);
+
        if (args > 0) {
                size = args * 3;
                inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
@@ -117,35 +128,39 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
 
 #ifndef _WIN64
                if (args > 0) {
-                       *inst++ = REX_W;
-                       *inst++ = MOV_r_rm;
-                       *inst++ = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x7 /* 
rdi */;
+                       inst[0] = REX_W;
+                       inst[1] = MOV_r_rm;
+                       inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x7 /* 
rdi */;
+                       inst += 3;
                }
                if (args > 1) {
-                       *inst++ = REX_W | REX_R;
-                       *inst++ = MOV_r_rm;
-                       *inst++ = MOD_REG | (reg_lmap[SLJIT_S1] << 3) | 0x6 /* 
rsi */;
+                       inst[0] = REX_W | REX_R;
+                       inst[1] = MOV_r_rm;
+                       inst[2] = MOD_REG | (reg_lmap[SLJIT_S1] << 3) | 0x6 /* 
rsi */;
+                       inst += 3;
                }
                if (args > 2) {
-                       *inst++ = REX_W | REX_R;
-                       *inst++ = MOV_r_rm;
-                       *inst++ = MOD_REG | (reg_lmap[SLJIT_S2] << 3) | 0x2 /* 
rdx */;
+                       inst[0] = REX_W | REX_R;
+                       inst[1] = MOV_r_rm;
+                       inst[2] = MOD_REG | (reg_lmap[SLJIT_S2] << 3) | 0x2 /* 
rdx */;
                }
 #else
                if (args > 0) {
-                       *inst++ = REX_W;
-                       *inst++ = MOV_r_rm;
-                       *inst++ = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x1 /* 
rcx */;
+                       inst[0] = REX_W;
+                       inst[1] = MOV_r_rm;
+                       inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x1 /* 
rcx */;
+                       inst += 3;
                }
                if (args > 1) {
-                       *inst++ = REX_W;
-                       *inst++ = MOV_r_rm;
-                       *inst++ = MOD_REG | (reg_map[SLJIT_S1] << 3) | 0x2 /* 
rdx */;
+                       inst[0] = REX_W;
+                       inst[1] = MOV_r_rm;
+                       inst[2] = MOD_REG | (reg_map[SLJIT_S1] << 3) | 0x2 /* 
rdx */;
+                       inst += 3;
                }
                if (args > 2) {
-                       *inst++ = REX_W | REX_B;
-                       *inst++ = MOV_r_rm;
-                       *inst++ = MOD_REG | (reg_map[SLJIT_S2] << 3) | 0x0 /* 
r8 */;
+                       inst[0] = REX_W | REX_B;
+                       inst[1] = MOV_r_rm;
+                       inst[2] = MOD_REG | (reg_map[SLJIT_S2] << 3) | 0x0 /* 
r8 */;
                }
 #endif
        }
@@ -154,58 +169,42 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
        compiler->local_size = local_size;
 
 #ifdef _WIN64
-       if (local_size > 1024) {
-               /* Allocate stack for the callback, which grows the stack. */
-               inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 + (3 + 
sizeof(sljit_s32)));
-               FAIL_IF(!inst);
-               INC_SIZE(4 + (3 + sizeof(sljit_s32)));
-               *inst++ = REX_W;
-               *inst++ = GROUP_BINARY_83;
-               *inst++ = MOD_REG | SUB | reg_map[SLJIT_SP];
-               /* Allocated size for registers must be divisible by 8. */
-               SLJIT_ASSERT(!(saved_register_size & 0x7));
-               /* Aligned to 16 byte. */
-               if (saved_register_size & 0x8) {
-                       *inst++ = 5 * sizeof(sljit_sw);
-                       local_size -= 5 * sizeof(sljit_sw);
-               } else {
-                       *inst++ = 4 * sizeof(sljit_sw);
-                       local_size -= 4 * sizeof(sljit_sw);
-               }
-               /* Second instruction */
-               SLJIT_ASSERT(reg_map[SLJIT_R0] < 8);
-               *inst++ = REX_W;
-               *inst++ = MOV_rm_i32;
-               *inst++ = MOD_REG | reg_lmap[SLJIT_R0];
-               sljit_unaligned_store_s32(inst, local_size);
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
-                       || (defined SLJIT_ARGUMENT_CHECKS && 
SLJIT_ARGUMENT_CHECKS)
-               compiler->skip_checks = 1;
-#endif
-               FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, 
SLJIT_FUNC_OFFSET(sljit_grow_stack)));
-       }
-#endif
-
        if (local_size > 0) {
-               if (local_size <= 127) {
-                       inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
-                       FAIL_IF(!inst);
-                       INC_SIZE(4);
-                       *inst++ = REX_W;
-                       *inst++ = GROUP_BINARY_83;
-                       *inst++ = MOD_REG | SUB | reg_map[SLJIT_SP];
-                       *inst++ = local_size;
+               if (local_size <= 4 * 4096) {
+                       if (local_size > 4096)
+                               EMIT_MOV(compiler, TMP_REG1, 0, 
SLJIT_MEM1(SLJIT_SP), -4096);
+                       if (local_size > 2 * 4096)
+                               EMIT_MOV(compiler, TMP_REG1, 0, 
SLJIT_MEM1(SLJIT_SP), -4096 * 2);
+                       if (local_size > 3 * 4096)
+                               EMIT_MOV(compiler, TMP_REG1, 0, 
SLJIT_MEM1(SLJIT_SP), -4096 * 3);
                }
                else {
-                       inst = (sljit_u8*)ensure_buf(compiler, 1 + 7);
+                       EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_SP, 0);
+                       EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, (local_size 
- 1) >> 12);
+
+                       SLJIT_ASSERT (reg_map[SLJIT_R0] == 0);
+
+                       EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_MEM1(SLJIT_R0), 
-4096);
+                       FAIL_IF(emit_non_cum_binary(compiler, 
BINARY_OPCODE(SUB),
+                               SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 4096));
+                       FAIL_IF(emit_non_cum_binary(compiler, 
BINARY_OPCODE(SUB),
+                               TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, 1));
+
+                       inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
                        FAIL_IF(!inst);
-                       INC_SIZE(7);
-                       *inst++ = REX_W;
-                       *inst++ = GROUP_BINARY_81;
-                       *inst++ = MOD_REG | SUB | reg_map[SLJIT_SP];
-                       sljit_unaligned_store_s32(inst, local_size);
-                       inst += sizeof(sljit_s32);
+
+                       INC_SIZE(2);
+                       inst[0] = JNE_i8;
+                       inst[1] = (sljit_s8) -19;
                }
+
+               EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), 
-local_size);
+       }
+#endif
+
+       if (local_size > 0) {
+               FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
+                       SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size));
        }
 
 #ifdef _WIN64
@@ -223,14 +222,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
 }
 
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler 
*compiler,
-       sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 
saveds,
+       sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 
saveds,
        sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
 {
        sljit_s32 saved_register_size;
 
        CHECK_ERROR();
-       CHECK(check_sljit_set_context(compiler, options, args, scratches, 
saveds, fscratches, fsaveds, local_size));
-       set_set_context(compiler, options, args, scratches, saveds, fscratches, 
fsaveds, local_size);
+       CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, 
saveds, fscratches, fsaveds, local_size));
+       set_set_context(compiler, options, arg_types, scratches, saveds, 
fscratches, fsaveds, local_size);
 
 #ifdef _WIN64
        /* Two/four register slots for parameters plus space for xmm6 register 
if needed. */
@@ -414,7 +413,11 @@ static sljit_u8* emit_x86_instruction(st
                        }
                }
        }
-       else if (!(flags & EX86_SSE2_OP2) && reg_map[b] >= 8)
+       else if (!(flags & EX86_SSE2_OP2)) {
+               if (reg_map[b] >= 8)
+                       rex |= REX_B;
+       }
+       else if (freg_map[b] >= 8)
                rex |= REX_B;
 
        if (a & SLJIT_IMM) {
@@ -441,7 +444,11 @@ static sljit_u8* emit_x86_instruction(st
        else {
                SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == 
SLJIT_PREF_SHIFT_REG);
                /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */
-               if (!(flags & EX86_SSE2_OP1) && reg_map[a] >= 8)
+               if (!(flags & EX86_SSE2_OP1)) {
+                       if (reg_map[a] >= 8)
+                               rex |= REX_R;
+               }
+               else if (freg_map[a] >= 8)
                        rex |= REX_R;
        }
 
@@ -468,12 +475,12 @@ static sljit_u8* emit_x86_instruction(st
                if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
                        *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : 
GROUP_BINARY_81;
 
-               if ((a & SLJIT_IMM) || (a == 0))
+               if (a & SLJIT_IMM)
                        *buf_ptr = 0;
                else if (!(flags & EX86_SSE2_OP1))
                        *buf_ptr = reg_lmap[a] << 3;
                else
-                       *buf_ptr = a << 3;
+                       *buf_ptr = freg_lmap[a] << 3;
        }
        else {
                if (a & SLJIT_IMM) {
@@ -487,7 +494,7 @@ static sljit_u8* emit_x86_instruction(st
        }
 
        if (!(b & SLJIT_MEM))
-               *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? 
reg_lmap[b] : b);
+               *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? 
reg_lmap[b] : freg_lmap[b]);
        else if ((b & REG_MASK) != SLJIT_UNUSED) {
                if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) 
== TO_OFFS_REG(SLJIT_SP)) {
                        if (immb != 0 || reg_lmap[b & REG_MASK] == 5) {
@@ -545,45 +552,161 @@ static sljit_u8* emit_x86_instruction(st
 /*  Call / return instructions                                           */
 /* --------------------------------------------------------------------- */
 
-static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 
type)
+#ifndef _WIN64
+
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 
arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
 {
-       sljit_u8 *inst;
+       sljit_s32 src = src_ptr ? (*src_ptr) : 0;
+       sljit_s32 word_arg_count = 0;
 
-       /* After any change update IS_REG_CHANGED_BY_CALL as well. */
-#ifndef _WIN64
-       SLJIT_ASSERT(reg_map[SLJIT_R1] == 6 && reg_map[SLJIT_R0] < 8 && 
reg_map[SLJIT_R2] < 8 && reg_map[TMP_REG1] == 2);
+       SLJIT_ASSERT(reg_map[SLJIT_R1] == 6 && reg_map[SLJIT_R3] == 1 && 
reg_map[TMP_REG1] == 2);
+
+       compiler->mode32 = 0;
+
+       /* Remove return value. */
+       arg_types >>= SLJIT_DEF_SHIFT;
+
+       while (arg_types) {
+               if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32)
+                       word_arg_count++;
+               arg_types >>= SLJIT_DEF_SHIFT;
+       }
+
+       if (word_arg_count == 0)
+               return SLJIT_SUCCESS;
+
+       if (src & SLJIT_MEM) {
+               ADJUST_LOCAL_OFFSET(src, srcw);
+               EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
+               *src_ptr = TMP_REG2;
+       }
+       else if (src == SLJIT_R2 && word_arg_count >= SLJIT_R2)
+               *src_ptr = TMP_REG1;
+
+       if (word_arg_count >= 3)
+               EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R2, 0);
+       return emit_mov(compiler, SLJIT_R2, 0, SLJIT_R0, 0);
+}
 
-       inst = (sljit_u8*)ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 
6));
-       FAIL_IF(!inst);
-       INC_SIZE((type < SLJIT_CALL3) ? 3 : 6);
-       if (type >= SLJIT_CALL3) {
-               /* Move third argument to TMP_REG1. */
-               *inst++ = REX_W;
-               *inst++ = MOV_r_rm;
-               *inst++ = MOD_REG | (0x2 /* rdx */ << 3) | reg_lmap[SLJIT_R2];
-       }
-       *inst++ = REX_W;
-       *inst++ = MOV_r_rm;
-       *inst++ = MOD_REG | (0x7 /* rdi */ << 3) | reg_lmap[SLJIT_R0];
 #else
-       SLJIT_ASSERT(reg_map[SLJIT_R1] == 2 && reg_map[SLJIT_R0] < 8 && 
reg_map[SLJIT_R2] < 8 && reg_map[TMP_REG1] == 8);
 
-       inst = (sljit_u8*)ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 
6));
-       FAIL_IF(!inst);
-       INC_SIZE((type < SLJIT_CALL3) ? 3 : 6);
-       if (type >= SLJIT_CALL3) {
-               /* Move third argument to TMP_REG1. */
-               *inst++ = REX_W | REX_R;
-               *inst++ = MOV_r_rm;
-               *inst++ = MOD_REG | (0x0 /* r8 */ << 3) | reg_lmap[SLJIT_R2];
-       }
-       *inst++ = REX_W;
-       *inst++ = MOV_r_rm;
-       *inst++ = MOD_REG | (0x1 /* rcx */ << 3) | reg_lmap[SLJIT_R0];
-#endif
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 
arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
+{
+       sljit_s32 src = src_ptr ? (*src_ptr) : 0;
+       sljit_s32 arg_count = 0;
+       sljit_s32 word_arg_count = 0;
+       sljit_s32 float_arg_count = 0;
+       sljit_s32 types = 0;
+       sljit_s32 data_trandfer = 0;
+       static sljit_u8 word_arg_regs[5] = { 0, SLJIT_R3, SLJIT_R1, SLJIT_R2, 
TMP_REG1 };
+
+       SLJIT_ASSERT(reg_map[SLJIT_R3] == 1 && reg_map[SLJIT_R1] == 2 && 
reg_map[SLJIT_R2] == 8 && reg_map[TMP_REG1] == 9);
+
+       compiler->mode32 = 0;
+       arg_types >>= SLJIT_DEF_SHIFT;
+
+       while (arg_types) {
+               types = (types << SLJIT_DEF_SHIFT) | (arg_types & 
SLJIT_DEF_MASK);
+
+               switch (arg_types & SLJIT_DEF_MASK) {
+               case SLJIT_ARG_TYPE_F32:
+               case SLJIT_ARG_TYPE_F64:
+                       arg_count++;
+                       float_arg_count++;
+
+                       if (arg_count != float_arg_count)
+                               data_trandfer = 1;
+                       break;
+               default:
+                       arg_count++;
+                       word_arg_count++;
+
+                       if (arg_count != word_arg_count || arg_count != 
word_arg_regs[arg_count]) {
+                               data_trandfer = 1;
+
+                               if (src == word_arg_regs[arg_count]) {
+                                       EMIT_MOV(compiler, TMP_REG2, 0, src, 0);
+                                       *src_ptr = TMP_REG2;
+                               }
+                       }
+                       break;
+               }
+
+               arg_types >>= SLJIT_DEF_SHIFT;
+       }
+
+       if (!data_trandfer)
+               return SLJIT_SUCCESS;
+
+       if (src & SLJIT_MEM) {
+               ADJUST_LOCAL_OFFSET(src, srcw);
+               EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
+               *src_ptr = TMP_REG2;
+       }
+
+       while (types) {
+               switch (types & SLJIT_DEF_MASK) {
+               case SLJIT_ARG_TYPE_F32:
+                       if (arg_count != float_arg_count)
+                               FAIL_IF(emit_sse2_load(compiler, 1, arg_count, 
float_arg_count, 0));
+                       arg_count--;
+                       float_arg_count--;
+                       break;
+               case SLJIT_ARG_TYPE_F64:
+                       if (arg_count != float_arg_count)
+                               FAIL_IF(emit_sse2_load(compiler, 0, arg_count, 
float_arg_count, 0));
+                       arg_count--;
+                       float_arg_count--;
+                       break;
+               default:
+                       if (arg_count != word_arg_count || arg_count != 
word_arg_regs[arg_count])
+                               EMIT_MOV(compiler, word_arg_regs[arg_count], 0, 
word_arg_count, 0);
+                       arg_count--;
+                       word_arg_count--;
+                       break;
+               }
+
+               types >>= SLJIT_DEF_SHIFT;
+       }
+
        return SLJIT_SUCCESS;
 }
 
+#endif
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct 
sljit_compiler *compiler, sljit_s32 type,
+       sljit_s32 arg_types)
+{
+       CHECK_ERROR_PTR();
+       CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+       PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL, 0));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+               || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+       compiler->skip_checks = 1;
+#endif
+
+       return sljit_emit_jump(compiler, type);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler 
*compiler, sljit_s32 type,
+       sljit_s32 arg_types,
+       sljit_s32 src, sljit_sw srcw)
+{
+       CHECK_ERROR();
+       CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+       FAIL_IF(call_with_args(compiler, arg_types, &src, srcw));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+               || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+       compiler->skip_checks = 1;
+#endif
+
+       return sljit_emit_ijump(compiler, type, src, srcw);
+}
+
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler 
*compiler, sljit_s32 dst, sljit_sw dstw)
 {
        sljit_u8 *inst;
@@ -629,11 +752,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
        CHECK(check_sljit_emit_fast_return(compiler, src, srcw));
        ADJUST_LOCAL_OFFSET(src, srcw);
 
-       if ((src & SLJIT_IMM) && NOT_HALFWORD(srcw)) {
-               FAIL_IF(emit_load_imm64(compiler, TMP_REG1, srcw));
-               src = TMP_REG1;
-       }
-
        if (FAST_IS_REG(src)) {
                if (reg_map[src] < 8) {
                        inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 1);
@@ -651,7 +769,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                        PUSH_REG(reg_lmap[src]);
                }
        }
-       else if (src & SLJIT_MEM) {
+       else {
                /* REX_W is not necessary (src is not immediate). */
                compiler->mode32 = 1;
                inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
@@ -663,23 +781,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                FAIL_IF(!inst);
                INC_SIZE(1);
        }
-       else {
-               SLJIT_ASSERT(IS_HALFWORD(srcw));
-               /* SLJIT_IMM. */
-               inst = (sljit_u8*)ensure_buf(compiler, 1 + 5 + 1);
-               FAIL_IF(!inst);
-
-               INC_SIZE(5 + 1);
-               *inst++ = PUSH_i32;
-               sljit_unaligned_store_s32(inst, srcw);
-               inst += sizeof(sljit_s32);
-       }
 
        RET();
        return SLJIT_SUCCESS;
 }
 
-
 /* --------------------------------------------------------------------- */
 /*  Extend input                                                         */
 /* --------------------------------------------------------------------- */

Modified: tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_common.c
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_common.c?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
--- tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_common.c (original)
+++ tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeX86_common.c Fri Aug 24 
14:36:12 2018
@@ -26,7 +26,11 @@
 
 SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
 {
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+       return "x86" SLJIT_CPUINFO " ABI:fastcall";
+#else
        return "x86" SLJIT_CPUINFO;
+#endif
 }
 
 /*
@@ -35,7 +39,7 @@ SLJIT_API_FUNC_ATTRIBUTE const char* slj
      1 - ECX
      2 - EDX
      3 - EBX
-     4 - none
+     4 - ESP
      5 - EBP
      6 - ESI
      7 - EDI
@@ -47,7 +51,7 @@ SLJIT_API_FUNC_ATTRIBUTE const char* slj
      1 - RCX
      2 - RDX
      3 - RBX
-     4 - none
+     4 - RSP
      5 - RBP
      6 - RSI
      7 - RDI
@@ -92,23 +96,32 @@ static const sljit_u8 reg_map[SLJIT_NUMB
 #ifndef _WIN64
 /* Args: rdi(=7), rsi(=6), rdx(=2), rcx(=1), r8, r9. Scratches: rax(=0), r10, 
r11 */
 static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = {
-       0, 0, 6, 1, 7, 8, 11, 10, 12, 5, 13, 14, 15, 3, 4, 2, 9
+       0, 0, 6, 7, 1, 8, 11, 10, 12, 5, 13, 14, 15, 3, 4, 2, 9
 };
 /* low-map. reg_map & 0x7. */
 static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
-       0, 0, 6, 1, 7, 0, 3,  2,  4,  5,  5,  6,  7, 3, 4, 2, 1
+       0, 0, 6, 7, 1, 0, 3,  2,  4,  5,  5,  6,  7, 3, 4, 2, 1
 };
 #else
 /* Args: rcx(=1), rdx(=2), r8, r9. Scratches: rax(=0), r10, r11 */
 static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = {
-       0, 0, 2, 1, 10, 11, 12, 5, 13, 14, 15, 7, 6, 3, 4, 8, 9
+       0, 0, 2, 8, 1, 11, 12, 5, 13, 14, 15, 7, 6, 3, 4, 9, 10
 };
 /* low-map. reg_map & 0x7. */
 static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
-       0, 0, 2, 1, 2,  3,  4,  5,  5, 6,  7,  7, 6, 3, 4, 0, 1
+       0, 0, 2, 0, 1,  3,  4, 5,  5,  6,  7, 7, 6, 3, 4, 1,  2
 };
 #endif
 
+/* Args: xmm0-xmm3 */
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
+       4, 0, 1, 2, 3, 5, 6
+};
+/* low-map. freg_map & 0x7. */
+static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
+       4, 0, 1, 2, 3, 5, 6
+};
+
 #define REX_W          0x48
 #define REX_R          0x44
 #define REX_X          0x42
@@ -178,6 +191,8 @@ static const sljit_u8 reg_lmap[SLJIT_NUM
 #define CVTTSD2SI_r_xm 0x2c
 #define DIV            (/* GROUP_F7 */ 6 << 3)
 #define DIVSD_x_xm     0x5e
+#define FSTPS          0xd9
+#define FSTPD          0xdd
 #define INT3           0xcc
 #define IDIV           (/* GROUP_F7 */ 7 << 3)
 #define IMUL           (/* GROUP_F7 */ 5 << 3)
@@ -462,11 +477,7 @@ static sljit_u8* generate_near_jump_code
                code_ptr += sizeof(sljit_s8);
        } else {
                jump->flags |= PATCH_MW;
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
-               code_ptr += sizeof(sljit_sw);
-#else
                code_ptr += sizeof(sljit_s32);
-#endif
        }
 
        return code_ptr;
@@ -613,9 +624,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                        get_cpu_features();
                return cpu_has_cmov;
 
-       case SLJIT_HAS_PREF_SHIFT_REG:
-               return 1;
-
        case SLJIT_HAS_SSE2:
 #if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
                if (cpu_has_sse2 == -1)
@@ -634,14 +642,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
 /*  Operators                                                            */
 /* --------------------------------------------------------------------- */
 
+#define BINARY_OPCODE(opcode) (((opcode ## _EAX_i32) << 24) | ((opcode ## 
_r_rm) << 16) | ((opcode ## _rm_r) << 8) | (opcode))
+
 static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
-       sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm,
+       sljit_u32 op_types,
        sljit_s32 dst, sljit_sw dstw,
        sljit_s32 src1, sljit_sw src1w,
        sljit_s32 src2, sljit_sw src2w);
 
 static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
-       sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm,
+       sljit_u32 op_types,
        sljit_s32 dst, sljit_sw dstw,
        sljit_s32 src1, sljit_sw src1w,
        sljit_s32 src2, sljit_sw src2w);
@@ -653,22 +663,11 @@ static sljit_s32 emit_mov(struct sljit_c
 #define EMIT_MOV(compiler, dst, dstw, src, srcw) \
        FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw));
 
-#ifdef _WIN32
-#include <malloc.h>
-
-static void SLJIT_CALL sljit_grow_stack(sljit_sw local_size)
-{
-       /* Workaround for calling the internal _chkstk() function on Windows.
-       This function touches all 4k pages belongs to the requested stack space,
-       which size is passed in local_size. This is necessary on Windows where
-       the stack can only grow in 4k steps. However, this function just burn
-       CPU cycles if the stack is large enough. However, you don't know it in
-       advance, so it must always be called. I think this is a bad design in
-       general even if it has some reasons. */
-       *(volatile sljit_s32*)alloca(local_size) = 0;
-}
+static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler,
+       sljit_s32 single, sljit_s32 dst, sljit_sw dstw, sljit_s32 src);
 
-#endif
+static SLJIT_INLINE sljit_s32 emit_sse2_load(struct sljit_compiler *compiler,
+       sljit_s32 single, sljit_s32 dst, sljit_s32 src, sljit_sw srcw);
 
 #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
 #include "sljitNativeX86_32.c"
@@ -1115,7 +1114,7 @@ static sljit_s32 emit_unary(struct sljit
                return SLJIT_SUCCESS;
        }
 
-       if (dst == SLJIT_UNUSED)
+       if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED))
                dst = TMP_REG1;
 
        if (FAST_IS_REG(dst)) {
@@ -1182,12 +1181,6 @@ static sljit_s32 emit_clz(struct sljit_c
 
        SLJIT_UNUSED_ARG(op_flags);
 
-       if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
-               EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcw);
-               src = TMP_REG1;
-               srcw = 0;
-       }
-
        if (cpu_has_cmov == -1)
                get_cpu_features();
 
@@ -1242,13 +1235,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
        sljit_s32 dst, sljit_sw dstw,
        sljit_s32 src, sljit_sw srcw)
 {
-       sljit_s32 update = 0;
        sljit_s32 op_flags = GET_ALL_FLAGS(op);
 #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
        sljit_s32 dst_is_ereg = 0;
-       sljit_s32 src_is_ereg = 0;
-#else
-#      define src_is_ereg 0
 #endif
 
        CHECK_ERROR();
@@ -1257,7 +1246,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
        ADJUST_LOCAL_OFFSET(src, srcw);
 
        CHECK_EXTRA_REGS(dst, dstw, dst_is_ereg = 1);
-       CHECK_EXTRA_REGS(src, srcw, src_is_ereg = 1);
+       CHECK_EXTRA_REGS(src, srcw, (void)0);
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
        compiler->mode32 = op_flags & SLJIT_I32_OP;
 #endif
@@ -1270,32 +1259,27 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
 
        op = GET_OPCODE(op);
 
-       if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) {
+       if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
                compiler->mode32 = 0;
 #endif
 
-               if (op_flags & SLJIT_I32_OP) {
-                       if (FAST_IS_REG(src) && src == dst) {
-                               if (!TYPE_CAST_NEEDED(op))
-                                       return SLJIT_SUCCESS;
-                       }
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-                       if (op == SLJIT_MOV_S32 && (src & SLJIT_MEM))
-                               op = SLJIT_MOV_U32;
-                       if (op == SLJIT_MOVU_S32 && (src & SLJIT_MEM))
-                               op = SLJIT_MOVU_U32;
-                       if (op == SLJIT_MOV_U32 && (src & SLJIT_IMM))
-                               op = SLJIT_MOV_S32;
-                       if (op == SLJIT_MOVU_U32 && (src & SLJIT_IMM))
-                               op = SLJIT_MOVU_S32;
-#endif
+               if (FAST_IS_REG(src) && src == dst) {
+                       if (!TYPE_CAST_NEEDED(op))
+                               return SLJIT_SUCCESS;
                }
 
-               SLJIT_COMPILE_ASSERT(SLJIT_MOV + 8 == SLJIT_MOVU, movu_offset);
-               if (op >= SLJIT_MOVU) {
-                       update = 1;
-                       op -= 8;
+               if (op_flags & SLJIT_I32_OP) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+                       if (src & SLJIT_MEM) {
+                               if (op == SLJIT_MOV_S32)
+                                       op = SLJIT_MOV_U32;
+                       }
+                       else if (src & SLJIT_IMM) {
+                               if (op == SLJIT_MOV_U32)
+                                       op = SLJIT_MOV_S32;
+                       }
+#endif
                }
 
                if (src & SLJIT_IMM) {
@@ -1369,28 +1353,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                if (SLJIT_UNLIKELY(dst_is_ereg) && dst == TMP_REG1)
                        return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), dstw, 
TMP_REG1, 0);
 #endif
-
-               if (SLJIT_UNLIKELY(update) && (src & SLJIT_MEM) && !src_is_ereg 
&& (src & REG_MASK)) {
-                       if ((src & OFFS_REG_MASK) != 0) {
-                               FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, 
ADD_rm_r, ADD, ADD_EAX_i32,
-                                               (src & REG_MASK), 0, (src & 
REG_MASK), 0, OFFS_REG(dst), 0));
-                       }
-                       else if (srcw != 0) {
-                               FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, 
ADD_rm_r, ADD, ADD_EAX_i32,
-                                               (src & REG_MASK), 0, (src & 
REG_MASK), 0, SLJIT_IMM, srcw));
-                       }
-               }
-
-               if (SLJIT_UNLIKELY(update) && (dst & SLJIT_MEM) && (dst & 
REG_MASK)) {
-                       if ((dst & OFFS_REG_MASK) != 0) {
-                               FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, 
ADD_rm_r, ADD, ADD_EAX_i32,
-                                               (dst & REG_MASK), 0, (dst & 
REG_MASK), 0, OFFS_REG(dst), 0));
-                       }
-                       else if (dstw != 0) {
-                               FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, 
ADD_rm_r, ADD, ADD_EAX_i32,
-                                               (dst & REG_MASK), 0, (dst & 
REG_MASK), 0, SLJIT_IMM, dstw));
-                       }
-               }
                return SLJIT_SUCCESS;
        }
 
@@ -1408,10 +1370,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
        }
 
        return SLJIT_SUCCESS;
-
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-#      undef src_is_ereg
-#endif
 }
 
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -1445,12 +1403,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
 #endif
 
 static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
-       sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm,
+       sljit_u32 op_types,
        sljit_s32 dst, sljit_sw dstw,
        sljit_s32 src1, sljit_sw src1w,
        sljit_s32 src2, sljit_sw src2w)
 {
        sljit_u8* inst;
+       sljit_u8 op_eax_imm = (op_types >> 24);
+       sljit_u8 op_rm = (op_types >> 16) & 0xff;
+       sljit_u8 op_mr = (op_types >> 8) & 0xff;
+       sljit_u8 op_imm = op_types & 0xff;
 
        if (dst == SLJIT_UNUSED) {
                EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
@@ -1561,12 +1523,16 @@ static sljit_s32 emit_cum_binary(struct
 }
 
 static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
-       sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm,
+       sljit_u32 op_types,
        sljit_s32 dst, sljit_sw dstw,
        sljit_s32 src1, sljit_sw src1w,
        sljit_s32 src2, sljit_sw src2w)
 {
        sljit_u8* inst;
+       sljit_u8 op_eax_imm = (op_types >> 24);
+       sljit_u8 op_rm = (op_types >> 16) & 0xff;
+       sljit_u8 op_mr = (op_types >> 8) & 0xff;
+       sljit_u8 op_imm = op_types & 0xff;
 
        if (dst == SLJIT_UNUSED) {
                EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
@@ -2044,7 +2010,7 @@ static sljit_s32 emit_shift(struct sljit
                *inst |= mode;
                EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
        }
-       else if (FAST_IS_REG(dst) && dst != src2 && 
!ADDRESSING_DEPENDS_ON(src2, dst)) {
+       else if (SLOW_IS_REG(dst) && dst != src2 && 
!ADDRESSING_DEPENDS_ON(src2, dst)) {
                if (src1 != dst)
                        EMIT_MOV(compiler, dst, 0, src1, src1w);
                EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
@@ -2057,27 +2023,24 @@ static sljit_s32 emit_shift(struct sljit
        else {
                /* This case is complex since ecx itself may be used for
                   addressing, and this case must be supported as well. */
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
                EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
                EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, 
SLJIT_PREF_SHIFT_REG, 0);
                EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
                inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, 
SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
                FAIL_IF(!inst);
                *inst |= mode;
                EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, 
SLJIT_MEM1(SLJIT_SP), 0);
-               EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
 #else
-               EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
-               EMIT_MOV(compiler, TMP_REG2, 0, src2, src2w);
-               inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, 
SLJIT_PREF_SHIFT_REG, 0);
-               FAIL_IF(!inst);
-               *inst = XCHG_r_rm;
+               EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
+               EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
                inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, 
SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
                FAIL_IF(!inst);
                *inst |= mode;
                EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
-               EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
 #endif
+               if (dst != SLJIT_UNUSED)
+                       return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
        }
 
        return SLJIT_SUCCESS;
@@ -2101,7 +2064,7 @@ static sljit_s32 emit_shift_with_flags(s
                if (!set_flags)
                        return emit_mov(compiler, dst, dstw, src1, src1w);
                /* OR dst, src, 0 */
-               return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, 
OR_EAX_i32,
+               return emit_cum_binary(compiler, BINARY_OPCODE(OR),
                        dst, dstw, src1, src1w, SLJIT_IMM, 0);
        }
 
@@ -2111,10 +2074,10 @@ static sljit_s32 emit_shift_with_flags(s
        if (!FAST_IS_REG(dst))
                FAIL_IF(emit_cmp_binary(compiler, src1, src1w, SLJIT_IMM, 0));
 
-       FAIL_IF(emit_shift(compiler,mode, dst, dstw, src1, src1w, src2, src2w));
+       FAIL_IF(emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, 
src2w));
 
        if (FAST_IS_REG(dst))
-               return emit_cmp_binary(compiler, dst, dstw, SLJIT_IMM, 0);
+               return emit_cmp_binary(compiler, (dst == SLJIT_UNUSED) ? 
TMP_REG1 : dst, dstw, SLJIT_IMM, 0);
        return SLJIT_SUCCESS;
 }
 
@@ -2145,10 +2108,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
                        if (emit_lea_binary(compiler, dst, dstw, src1, src1w, 
src2, src2w) != SLJIT_ERR_UNSUPPORTED)
                                return compiler->error;
                }
-               return emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, 
ADD_EAX_i32,
+               return emit_cum_binary(compiler, BINARY_OPCODE(ADD),
                        dst, dstw, src1, src1w, src2, src2w);
        case SLJIT_ADDC:
-               return emit_cum_binary(compiler, ADC_r_rm, ADC_rm_r, ADC, 
ADC_EAX_i32,
+               return emit_cum_binary(compiler, BINARY_OPCODE(ADC),
                        dst, dstw, src1, src1w, src2, src2w);
        case SLJIT_SUB:
                if (!HAS_FLAGS(op)) {
@@ -2158,23 +2121,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
 
                if (dst == SLJIT_UNUSED)
                        return emit_cmp_binary(compiler, src1, src1w, src2, 
src2w);
-               return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, 
SUB_EAX_i32,
+               return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
                        dst, dstw, src1, src1w, src2, src2w);
        case SLJIT_SUBC:
-               return emit_non_cum_binary(compiler, SBB_r_rm, SBB_rm_r, SBB, 
SBB_EAX_i32,
+               return emit_non_cum_binary(compiler, BINARY_OPCODE(SBB),
                        dst, dstw, src1, src1w, src2, src2w);
        case SLJIT_MUL:
                return emit_mul(compiler, dst, dstw, src1, src1w, src2, src2w);
        case SLJIT_AND:
                if (dst == SLJIT_UNUSED)
                        return emit_test_binary(compiler, src1, src1w, src2, 
src2w);
-               return emit_cum_binary(compiler, AND_r_rm, AND_rm_r, AND, 
AND_EAX_i32,
+               return emit_cum_binary(compiler, BINARY_OPCODE(AND),
                        dst, dstw, src1, src1w, src2, src2w);
        case SLJIT_OR:
-               return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, 
OR_EAX_i32,
+               return emit_cum_binary(compiler, BINARY_OPCODE(OR),
                        dst, dstw, src1, src1w, src2, src2w);
        case SLJIT_XOR:
-               return emit_cum_binary(compiler, XOR_r_rm, XOR_rm_r, XOR, 
XOR_EAX_i32,
+               return emit_cum_binary(compiler, BINARY_OPCODE(XOR),
                        dst, dstw, src1, src1w, src2, src2w);
        case SLJIT_SHL:
                return emit_shift_with_flags(compiler, SHL, HAS_FLAGS(op),
@@ -2203,7 +2166,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 
reg)
 {
        CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
        return reg;
+#else
+       return freg_map[reg];
+#endif
 }
 
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler 
*compiler,
@@ -2345,6 +2312,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit
                FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, 
src1, src1w));
                src1 = TMP_FREG;
        }
+
        return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_F32_OP), 
src1, src2, src2w);
 }
 
@@ -2516,9 +2484,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_ju
        set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
        type &= 0xff;
 
-       if (type >= SLJIT_CALL1)
-               PTR_FAIL_IF(call_with_args(compiler, type));
-
        /* Worst case size. */
 #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
        compiler->size += (type >= SLJIT_JUMP) ? 5 : 6;
@@ -2534,14 +2499,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_ju
        return jump;
 }
 
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-#ifndef _WIN64
-#define IS_REG_CHANGED_BY_CALL(src, type) ((src) == SLJIT_R3)
-#else
-#define IS_REG_CHANGED_BY_CALL(src, type) ((src) == SLJIT_R2)
-#endif
-#endif
-
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler 
*compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
 {
        sljit_u8 *inst;
@@ -2553,25 +2510,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit
 
        CHECK_EXTRA_REGS(src, srcw, (void)0);
 
-       if (type >= SLJIT_CALL1) {
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
-               if (src == SLJIT_R2) {
-                       EMIT_MOV(compiler, TMP_REG1, 0, src, 0);
-                       src = TMP_REG1;
-               }
-               if (src == SLJIT_MEM1(SLJIT_SP) && type >= SLJIT_CALL3)
-                       srcw += sizeof(sljit_sw);
-#endif
-#else
-               if ((src & SLJIT_MEM) || IS_REG_CHANGED_BY_CALL(src, type)) {
-                       EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
-                       src = TMP_REG2;
-               }
-#endif
-               FAIL_IF(call_with_args(compiler, type));
-       }
-
        if (src == SLJIT_IMM) {
                jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct 
sljit_jump));
                FAIL_IF_NULL(jump);

Modified: tomcat/jk/trunk/native/iis/pcre/sljit/sljitUtils.c
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/sljit/sljitUtils.c?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
--- tomcat/jk/trunk/native/iis/pcre/sljit/sljitUtils.c (original)
+++ tomcat/jk/trunk/native/iis/pcre/sljit/sljitUtils.c Fri Aug 24 14:36:12 2018
@@ -48,12 +48,12 @@ static SLJIT_INLINE void allocator_relea
 
 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
 
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void)
 {
        /* Always successful. */
 }
 
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void)
 {
        /* Always successful. */
 }
@@ -88,7 +88,7 @@ static SLJIT_INLINE void allocator_relea
 
 static HANDLE global_mutex = 0;
 
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void)
 {
        /* No idea what to do if an error occures. Static mutexes should never 
fail... */
        if (!global_mutex)
@@ -97,7 +97,7 @@ SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL
                WaitForSingleObject(global_mutex, INFINITE);
 }
 
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void)
 {
        ReleaseMutex(global_mutex);
 }
@@ -130,12 +130,12 @@ static SLJIT_INLINE void allocator_relea
 
 static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER;
 
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void)
 {
        pthread_mutex_lock(&global_mutex);
 }
 
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void)
 {
        pthread_mutex_unlock(&global_mutex);
 }
@@ -203,7 +203,7 @@ static SLJIT_INLINE sljit_s32 open_dev_z
 /* Planning to make it even more clever in the future. */
 static sljit_sw sljit_page_align = 0;
 
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL 
sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit, void *allocator_data)
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_FUNC 
sljit_allocate_stack(sljit_uw start_size, sljit_uw max_size, void 
*allocator_data)
 {
        struct sljit_stack *stack;
        void *ptr;
@@ -212,7 +212,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_st
 #endif
 
        SLJIT_UNUSED_ARG(allocator_data);
-       if (limit > max_limit || limit < 1)
+       if (start_size > max_size || start_size < 1)
                return NULL;
 
 #ifdef _WIN32
@@ -234,25 +234,27 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_st
        if (!stack)
                return NULL;
 
-       /* Align max_limit. */
-       max_limit = (max_limit + sljit_page_align) & ~sljit_page_align;
+       /* Align max_size. */
+       max_size = (max_size + sljit_page_align) & ~sljit_page_align;
 
 #ifdef _WIN32
-       ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE);
+       ptr = VirtualAlloc(NULL, max_size, MEM_RESERVE, PAGE_READWRITE);
        if (!ptr) {
                SLJIT_FREE(stack, allocator_data);
                return NULL;
        }
-       stack->max_limit = (sljit_u8 *)ptr;
-       stack->base = stack->max_limit + max_limit;
-       stack->limit = stack->base;
-       if (sljit_stack_resize(stack, stack->base - limit)) {
+
+       stack->min_start = (sljit_u8 *)ptr;
+       stack->end = stack->min_start + max_size;
+       stack->start = stack->end;
+
+       if (sljit_stack_resize(stack, stack->end - start_size) == NULL) {
                sljit_free_stack(stack, allocator_data);
                return NULL;
        }
 #else
 #ifdef MAP_ANON
-       ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | 
MAP_ANON, -1, 0);
+       ptr = mmap(NULL, max_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | 
MAP_ANON, -1, 0);
 #else
        if (dev_zero < 0) {
                if (open_dev_zero()) {
@@ -260,73 +262,70 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_st
                        return NULL;
                }
        }
-       ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, 
dev_zero, 0);
+       ptr = mmap(NULL, max_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, 
dev_zero, 0);
 #endif
        if (ptr == MAP_FAILED) {
                SLJIT_FREE(stack, allocator_data);
                return NULL;
        }
-       stack->max_limit = (sljit_u8 *)ptr;
-       stack->base = stack->max_limit + max_limit;
-       stack->limit = stack->base - limit;
+       stack->min_start = (sljit_u8 *)ptr;
+       stack->end = stack->min_start + max_size;
+       stack->start = stack->end - start_size;
 #endif
-       stack->top = stack->base;
+       stack->top = stack->end;
        return stack;
 }
 
 #undef PAGE_ALIGN
 
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack 
*stack, void *allocator_data)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_free_stack(struct sljit_stack 
*stack, void *allocator_data)
 {
        SLJIT_UNUSED_ARG(allocator_data);
 #ifdef _WIN32
-       VirtualFree((void*)stack->max_limit, 0, MEM_RELEASE);
+       VirtualFree((void*)stack->min_start, 0, MEM_RELEASE);
 #else
-       munmap((void*)stack->max_limit, stack->base - stack->max_limit);
+       munmap((void*)stack->min_start, stack->end - stack->min_start);
 #endif
        SLJIT_FREE(stack, allocator_data);
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct 
sljit_stack *stack, sljit_u8 *new_limit)
+SLJIT_API_FUNC_ATTRIBUTE sljit_u8 *SLJIT_FUNC sljit_stack_resize(struct 
sljit_stack *stack, sljit_u8 *new_start)
 {
-       sljit_uw aligned_old_limit;
-       sljit_uw aligned_new_limit;
+       sljit_uw aligned_old_start;
+       sljit_uw aligned_new_start;
+
+       if ((new_start < stack->min_start) || (new_start >= stack->end))
+               return NULL;
 
-       if ((new_limit < stack->max_limit) || (new_limit >= stack->base))
-               return -1;
 #ifdef _WIN32
-       aligned_new_limit = (sljit_uw)new_limit & ~sljit_page_align;
-       aligned_old_limit = ((sljit_uw)stack->limit) & ~sljit_page_align;
-       if (aligned_new_limit != aligned_old_limit) {
-               if (aligned_new_limit < aligned_old_limit) {
-                       if (!VirtualAlloc((void*)aligned_new_limit, 
aligned_old_limit - aligned_new_limit, MEM_COMMIT, PAGE_READWRITE))
-                               return -1;
+       aligned_new_start = (sljit_uw)new_start & ~sljit_page_align;
+       aligned_old_start = ((sljit_uw)stack->start) & ~sljit_page_align;
+       if (aligned_new_start != aligned_old_start) {
+               if (aligned_new_start < aligned_old_start) {
+                       if (!VirtualAlloc((void*)aligned_new_start, 
aligned_old_start - aligned_new_start, MEM_COMMIT, PAGE_READWRITE))
+                               return NULL;
                }
                else {
-                       if (!VirtualFree((void*)aligned_old_limit, 
aligned_new_limit - aligned_old_limit, MEM_DECOMMIT))
-                               return -1;
+                       if (!VirtualFree((void*)aligned_old_start, 
aligned_new_start - aligned_old_start, MEM_DECOMMIT))
+                               return NULL;
                }
        }
-       stack->limit = new_limit;
-       return 0;
 #else
-       if (new_limit <= stack->limit) {
-               stack->limit = new_limit;
-               return 0;
-       }
-       aligned_new_limit = (sljit_uw)new_limit & ~sljit_page_align;
-       aligned_old_limit = ((sljit_uw)stack->limit) & ~sljit_page_align;
-       /* If madvise is available, we release the unnecessary space. */
+       if (stack->start < new_start) {
+               aligned_new_start = (sljit_uw)new_start & ~sljit_page_align;
+               aligned_old_start = ((sljit_uw)stack->start) & 
~sljit_page_align;
+               /* If madvise is available, we release the unnecessary space. */
 #if defined(MADV_DONTNEED)
-       if (aligned_new_limit > aligned_old_limit)
-               madvise((void*)aligned_old_limit, aligned_new_limit - 
aligned_old_limit, MADV_DONTNEED);
+               if (aligned_new_start > aligned_old_start)
+                       madvise((void*)aligned_old_start, aligned_new_start - 
aligned_old_start, MADV_DONTNEED);
 #elif defined(POSIX_MADV_DONTNEED)
-       if (aligned_new_limit > aligned_old_limit)
-               posix_madvise((void*)aligned_old_limit, aligned_new_limit - 
aligned_old_limit, POSIX_MADV_DONTNEED);
+               if (aligned_new_start > aligned_old_start)
+                       posix_madvise((void*)aligned_old_start, 
aligned_new_start - aligned_old_start, POSIX_MADV_DONTNEED);
 #endif
-       stack->limit = new_limit;
-       return 0;
+       }
 #endif
+       stack->start = new_start;
+       return new_start;
 }
 
 #endif /* SLJIT_UTIL_STACK */

Modified: tomcat/jk/trunk/native/iis/pcre/test-driver
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/test-driver?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
--- tomcat/jk/trunk/native/iis/pcre/test-driver (original)
+++ tomcat/jk/trunk/native/iis/pcre/test-driver Fri Aug 24 14:36:12 2018
@@ -1,9 +1,9 @@
 #! /bin/sh
 # test-driver - basic testsuite driver script.
 
-scriptversion=2013-07-13.22; # UTC
+scriptversion=2016-01-11.22; # UTC
 
-# Copyright (C) 2011-2014 Free Software Foundation, Inc.
+# Copyright (C) 2011-2017 Free Software Foundation, Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -143,6 +143,6 @@ echo ":copy-in-global-log: $gcopy" >> $t
 # eval: (add-hook 'write-file-hooks 'time-stamp)
 # time-stamp-start: "scriptversion="
 # time-stamp-format: "%:y-%02m-%02d.%02H"
-# time-stamp-time-zone: "UTC"
+# time-stamp-time-zone: "UTC0"
 # time-stamp-end: "; # UTC"
 # End:

Modified: tomcat/jk/trunk/native/iis/pcre/testdata/testinput2
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/testdata/testinput2?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
Binary files - no diff available.

Modified: tomcat/jk/trunk/native/iis/pcre/testdata/testinput5
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/testdata/testinput5?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
Binary files - no diff available.

Modified: tomcat/jk/trunk/native/iis/pcre/testdata/testoutput2
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/testdata/testoutput2?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
Binary files - no diff available.

Modified: tomcat/jk/trunk/native/iis/pcre/testdata/testoutput5
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/testdata/testoutput5?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
Binary files - no diff available.

Modified: tomcat/jk/trunk/xdocs/miscellaneous/changelog.xml
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/xdocs/miscellaneous/changelog.xml?rev=1838867&r1=1838866&r2=1838867&view=diff
==============================================================================
--- tomcat/jk/trunk/xdocs/miscellaneous/changelog.xml (original)
+++ tomcat/jk/trunk/xdocs/miscellaneous/changelog.xml Fri Aug 24 14:36:12 2018
@@ -88,6 +88,9 @@
         now effectively hard-coded to CollpaseSlashesAll due to the changes
         made to align normalization with that implemented in Tomcat. (markt)
       </add>
+      <update>
+        Update PCRE bundled with the ISAPI redirector to 8.42. (rjung)
+      </update>
    </changelog>
   </subsection>
 </section>



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to