This cleans handling of 64-bit stack slots up and fixes the buggy spilling code. We always allocated 32-bit spill slot regardless of register type which caused memory corruption.
Signed-off-by: Tomek Grabiec <tgrab...@gmail.com> --- arch/mmix/include/arch/instruction.h | 15 +----- arch/mmix/instruction.c | 12 +++++ arch/x86/emit-code.c | 4 +- arch/x86/include/arch/instruction.h | 19 +------ arch/x86/include/arch/stack-frame.h | 1 + arch/x86/insn-selector.brg | 57 +++++------------------ arch/x86/instruction.c | 85 ++++++++++++++++++++++++++++++++++ arch/x86/stack-frame.c | 15 ++++-- include/jit/stack-slot.h | 4 ++ include/vm/types.h | 7 +++ jit/spill-reload.c | 42 +++++++---------- jit/stack-slot.c | 10 ++++ 12 files changed, 168 insertions(+), 103 deletions(-) diff --git a/arch/mmix/include/arch/instruction.h b/arch/mmix/include/arch/instruction.h index 2fe1686..6846eb3 100644 --- a/arch/mmix/include/arch/instruction.h +++ b/arch/mmix/include/arch/instruction.h @@ -78,6 +78,9 @@ struct insn *ld_insn(enum insn_type, struct stack_slot *, struct var_info *); * instructions. */ +int insert_copy_slot_32_insns(struct stack_slot *, struct stack_slot *, struct list_head *, unsigned long); +int insert_copy_slot_64_insns(struct stack_slot *, struct stack_slot *, struct list_head *, unsigned long); + static inline struct insn * spill_insn(struct var_info *var, struct stack_slot *slot) { @@ -91,18 +94,6 @@ reload_insn(struct stack_slot *slot, struct var_info *var) } static inline struct insn * -push_slot_insn(struct stack_slot *slot) -{ - return NULL; -} - -static inline struct insn * -pop_slot_insn(struct stack_slot *slot) -{ - return NULL; -} - -static inline struct insn * exception_spill_insn(struct stack_slot *slot) { return NULL; diff --git a/arch/mmix/instruction.c b/arch/mmix/instruction.c index f79a2ca..baa4262 100644 --- a/arch/mmix/instruction.c +++ b/arch/mmix/instruction.c @@ -122,3 +122,15 @@ struct insn *ld_insn(enum insn_type insn_type, struct stack_slot *slot, struct v } return insn; } + +int insert_copy_slot_32_insns(struct stack_slot *from, struct stack_slot *to, + struct list_head *add_before, unsigned long bc_offset) +{ + return 0; +} + +int insert_copy_slot_64_insns(struct stack_slot *from, struct stack_slot *to, + struct list_head *add_before, unsigned long bc_offset) +{ + return 0; +} diff --git a/arch/x86/emit-code.c b/arch/x86/emit-code.c index a0b85e0..2253202 100644 --- a/arch/x86/emit-code.c +++ b/arch/x86/emit-code.c @@ -734,7 +734,7 @@ emit_mov_64_memlocal_xmm(struct buffer *buf, struct operand *src, struct operand unsigned long disp; dest_reg = mach_reg(&dest->reg); - disp = slot_offset(src->slot); + disp = slot_offset_64(src->slot); emit(buf, 0xf2); emit(buf, 0x0f); @@ -905,7 +905,7 @@ static void emit_mov_64_xmm_memlocal(struct buffer *buf, struct operand *src, unsigned long disp; int mod; - disp = slot_offset(dest->slot); + disp = slot_offset_64(dest->slot); if (is_imm_8(disp)) mod = 0x01; diff --git a/arch/x86/include/arch/instruction.h b/arch/x86/include/arch/instruction.h index c33bafa..063e857 100644 --- a/arch/x86/include/arch/instruction.h +++ b/arch/x86/include/arch/instruction.h @@ -240,6 +240,9 @@ struct insn *membase_insn(enum insn_type, struct var_info *, long); * instructions. */ +int insert_copy_slot_32_insns(struct stack_slot *, struct stack_slot *, struct list_head *, unsigned long); +int insert_copy_slot_64_insns(struct stack_slot *, struct stack_slot *, struct list_head *, unsigned long); + static inline struct insn * spill_insn(struct var_info *var, struct stack_slot *slot) { @@ -282,22 +285,6 @@ reload_insn(struct stack_slot *slot, struct var_info *var) return memlocal_reg_insn(insn_type, slot, var); } -static inline struct insn * -push_slot_insn(struct stack_slot *from) -{ - assert(from != NULL); - - return memlocal_insn(INSN_PUSH_MEMLOCAL, from); -} - -static inline struct insn * -pop_slot_insn(struct stack_slot *to) -{ - assert(to != NULL); - - return memlocal_insn(INSN_POP_MEMLOCAL, to); -} - static inline struct insn *jump_insn(struct basic_block *bb) { return branch_insn(INSN_JMP_BRANCH, bb); diff --git a/arch/x86/include/arch/stack-frame.h b/arch/x86/include/arch/stack-frame.h index b0b42a2..bf69b27 100644 --- a/arch/x86/include/arch/stack-frame.h +++ b/arch/x86/include/arch/stack-frame.h @@ -43,6 +43,7 @@ struct jit_stack_frame { unsigned long frame_local_offset(struct vm_method *, struct expression *); unsigned long slot_offset(struct stack_slot *slot); +unsigned long slot_offset_64(struct stack_slot *slot); unsigned long frame_locals_size(struct stack_frame *frame); unsigned long cu_frame_locals_offset(struct compilation_unit *cu); diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg index c4f2ccf..44304f5 100644 --- a/arch/x86/insn-selector.brg +++ b/arch/x86/insn-selector.brg @@ -93,30 +93,6 @@ static void method_args_cleanup(struct basic_block *bb, struct tree_node *tree, select_insn(bb, tree, imm_reg_insn(INSN_ADD_IMM_REG, args_size, stack_ptr)); } -/** - * Returns the local slot for 64-bit value. On x86 such value requires - * 2 stack slots. We must return the one which has lower memory - * address. - */ -static struct stack_slot * -get_local_slot_64(struct stack_frame *frame, unsigned long idx) -{ -#ifdef CONFIG_X86_32 - struct stack_slot *s1, *s2; - - s1 = get_local_slot(frame, idx); - s2 = get_local_slot(frame, idx + 1); - - if ((long) slot_offset(s1) < 0) - return s2; /* Local variable, decreasing addresses. */ - - /* This slot corresponds to call argument, increasing addresses. */ - return s1; -#else - return get_local_slot(frame, idx); -#endif -} - struct _MBState; static void __binop_reg_local(struct _MBState *, struct basic_block *, struct tree_node *, enum insn_type, struct var_info *, long); @@ -278,13 +254,12 @@ freg: EXPR_FLOAT_LOCAL 0 result = get_var(s->b_parent, expr->vm_type); state->reg1 = result; - if (expr->vm_type == J_FLOAT) { - slot = get_local_slot(cu->stack_frame, expr->local_index); + slot = get_local_slot(cu->stack_frame, expr->local_index); + + if (expr->vm_type == J_FLOAT) select_insn(s, tree, memlocal_reg_insn(INSN_MOV_MEMLOCAL_XMM, slot, result)); - } else { - slot = get_local_slot_64(cu->stack_frame, expr->local_index); + else select_insn(s, tree, memlocal_reg_insn(INSN_MOV_64_MEMLOCAL_XMM, slot, result)); - } } reg: EXPR_TEMPORARY 0 @@ -2211,13 +2186,12 @@ stmt: STMT_STORE(EXPR_FLOAT_LOCAL, freg) stmt = to_stmt(tree); local = to_expr(stmt->store_dest); - if (local->vm_type == J_FLOAT) { - slot = get_local_slot(cu->stack_frame, local->local_index); + slot = get_local_slot(cu->stack_frame, local->local_index); + + if (local->vm_type == J_FLOAT) select_insn(s, tree, reg_memlocal_insn(INSN_MOV_XMM_MEMLOCAL, src, slot)); - } else { - slot = get_local_slot_64(cu->stack_frame, local->local_index); + else select_insn(s, tree, reg_memlocal_insn(INSN_MOV_64_XMM_MEMLOCAL, src, slot)); - } } %ifdef CONFIG_X86_32 @@ -2258,14 +2232,12 @@ stmt: STMT_STORE(EXPR_FLOAT_TEMPORARY, EXPR_FLOAT_LOCAL) 1 exprdest = to_expr(stmt->store_dest); dest = exprdest->tmp_low; + slot = get_local_slot(s->b_parent->stack_frame, exprsrc->local_index); - if (exprsrc->vm_type == J_FLOAT) { - slot = get_local_slot(s->b_parent->stack_frame, exprsrc->local_index); + if (exprsrc->vm_type == J_FLOAT) select_insn(s, tree, memlocal_reg_insn(INSN_MOV_MEMLOCAL_XMM, slot, dest)); - } else { - slot = get_local_slot_64(s->b_parent->stack_frame, exprsrc->local_index); + else select_insn(s, tree, memlocal_reg_insn(INSN_MOV_64_MEMLOCAL_XMM, slot, dest)); - } } %else stmt: STMT_STORE(EXPR_TEMPORARY, EXPR_LOCAL) 1 @@ -2754,12 +2726,7 @@ static void __binop_reg_local(struct _MBState *state, struct basic_block *bb, expr = to_expr(tree); disp = frame_local_offset(bb->b_parent->method, to_expr(expr->binary_right)); - if (disp < 0) - /* local stack slot, decreasing offset values */ - disp -= disp_offset; - else - /* method argument slot, increasing offset values */ - disp += disp_offset; + disp += disp_offset; frame_ptr = bb->b_parent->frame_ptr; diff --git a/arch/x86/instruction.c b/arch/x86/instruction.c index 0b1e145..acf994b 100644 --- a/arch/x86/instruction.c +++ b/arch/x86/instruction.c @@ -384,3 +384,88 @@ struct insn *membase_insn(enum insn_type insn_type, struct var_info *src_base_re } return insn; } + +int insert_copy_slot_32_insns(struct stack_slot *from, struct stack_slot *to, + struct list_head *add_before, unsigned long bc_offset) +{ + struct insn *push; + struct insn *pop; + + assert(from); + assert(to); + + push = memlocal_insn(INSN_PUSH_MEMLOCAL, from); + if (!push) + return -1; + + pop = memlocal_insn(INSN_POP_MEMLOCAL, to); + if (!pop) { + free_insn(push); + return -1; + } + + push->bytecode_offset = bc_offset; + pop->bytecode_offset = bc_offset; + + list_add_tail(&push->insn_list_node, add_before); + list_add(&pop->insn_list_node, &push->insn_list_node); + return 0; +} + +#ifdef CONFIG_X86_32 + +int insert_copy_slot_64_insns(struct stack_slot *from, struct stack_slot *to, + struct list_head *add_before, unsigned long bc_offset) +{ + struct insn *push_lo, *push_hi; + struct insn *pop_lo, *pop_hi; + + assert(from); + assert(to); + + push_hi = memlocal_insn(INSN_PUSH_MEMLOCAL, from); + if (!push_hi) + goto fail_push_hi; + + push_lo = memlocal_insn(INSN_PUSH_MEMLOCAL, get_next_slot(from)); + if (!push_lo) + goto fail_push_lo; + + pop_hi = memlocal_insn(INSN_POP_MEMLOCAL, to); + if (!pop_hi) + goto fail_pop_hi; + + pop_lo = memlocal_insn(INSN_POP_MEMLOCAL, get_next_slot(to)); + if (!pop_lo) + goto fail_pop_lo; + + push_lo->bytecode_offset = bc_offset; + push_hi->bytecode_offset = bc_offset; + pop_lo->bytecode_offset = bc_offset; + pop_hi->bytecode_offset = bc_offset; + + list_add_tail(&push_lo->insn_list_node, add_before); + list_add(&push_hi->insn_list_node, &push_lo->insn_list_node); + list_add(&pop_hi->insn_list_node, &push_hi->insn_list_node); + list_add(&pop_lo->insn_list_node, &pop_hi->insn_list_node); + return 0; + + fail_pop_lo: + free_insn(pop_hi); + fail_pop_hi: + free_insn(push_lo); + fail_push_lo: + free_insn(push_hi); + fail_push_hi: + return -1; +} + +#else + +int insert_copy_slot_64_insns(struct stack_slot *from, struct stack_slot *to, + struct list_head *add_before, unsigned long bc_offset) +{ + return insert_copy_slot_32_insns(from, to, add_before, bc_offset); +} + +#endif /* CONFIG_X86_32 */ diff --git a/arch/x86/stack-frame.c b/arch/x86/stack-frame.c index 4eba71f..d8440ce 100644 --- a/arch/x86/stack-frame.c +++ b/arch/x86/stack-frame.c @@ -68,12 +68,12 @@ static unsigned long __index_to_offset(unsigned long index) } static unsigned long -index_to_offset(unsigned long idx, unsigned long nr_args) +index_to_offset(unsigned long idx, int size, unsigned long nr_args) { if (idx < nr_args) return ARGS_START_OFFSET + __index_to_offset(idx); - return 0UL - __index_to_offset(idx - nr_args + 1); + return 0UL - __index_to_offset(idx - nr_args + size); } unsigned long frame_local_offset(struct vm_method *method, @@ -84,14 +84,21 @@ unsigned long frame_local_offset(struct vm_method *method, idx = local->local_index; nr_args = method->args_count; - return index_to_offset(idx, nr_args); + return index_to_offset(idx, vm_type_slot_size(local->vm_type), nr_args); } unsigned long slot_offset(struct stack_slot *slot) { struct stack_frame *frame = slot->parent; - return index_to_offset(slot->index, frame->nr_args); + return index_to_offset(slot->index, 1, frame->nr_args); +} + +unsigned long slot_offset_64(struct stack_slot *slot) +{ + struct stack_frame *frame = slot->parent; + + return index_to_offset(slot->index, 2, frame->nr_args); } unsigned long frame_locals_size(struct stack_frame *frame) diff --git a/include/jit/stack-slot.h b/include/jit/stack-slot.h index b0bb16d..cd865c9 100644 --- a/include/jit/stack-slot.h +++ b/include/jit/stack-slot.h @@ -1,6 +1,8 @@ #ifndef __JIT_STACK_SLOT_H #define __JIT_STACK_SLOT_H +#include "vm/types.h" + struct stack_frame; struct stack_slot { @@ -42,5 +44,7 @@ void free_stack_frame(struct stack_frame *frame); struct stack_slot *get_local_slot(struct stack_frame *frame, unsigned long index); struct stack_slot *get_spill_slot_32(struct stack_frame *frame); struct stack_slot *get_spill_slot_64(struct stack_frame *frame); +struct stack_slot *get_spill_slot(struct stack_frame *frame, enum vm_type type); +struct stack_slot *get_next_slot(struct stack_slot *slot); #endif /* __JIT_STACK_SLOT_H */ diff --git a/include/vm/types.h b/include/vm/types.h index 368729d..ae32f69 100644 --- a/include/vm/types.h +++ b/include/vm/types.h @@ -44,4 +44,11 @@ static inline bool vm_type_is_float(enum vm_type type) return type == J_FLOAT || type == J_DOUBLE; } +static inline int vm_type_slot_size(enum vm_type type) +{ + if (type == J_DOUBLE || type == J_LONG) + return 2; + return 1; +} + #endif diff --git a/jit/spill-reload.c b/jit/spill-reload.c index 70a5aa9..3dfb0b3 100644 --- a/jit/spill-reload.c +++ b/jit/spill-reload.c @@ -137,7 +137,7 @@ spill_interval(struct live_interval *interval, struct stack_slot *slot; struct insn *spill; - slot = get_spill_slot_32(cu->stack_frame); + slot = get_spill_slot(cu->stack_frame, interval->spill_reload_reg.vm_type); if (!slot) return NULL; @@ -185,33 +185,27 @@ static int insert_reload_insn(struct live_interval *interval, return 0; } -static int -insert_copy_slot_insn(struct live_interval *interval, - struct compilation_unit *cu, - struct stack_slot *from, - struct stack_slot *to, - struct list_head *push_before, - unsigned long bc_offset) +static int insert_copy_slot_insn(struct stack_slot *from, + struct stack_slot *to, + enum vm_type vm_type, + struct list_head *push_before, + unsigned long bc_offset) { - struct insn *push, *pop; + int slot_size; + int err; - push = push_slot_insn(from); - if (!push) - return warn("out of memory"), -ENOMEM; - push->bytecode_offset = bc_offset; + slot_size = vm_type_slot_size(vm_type); + assert(slot_size == 1 || slot_size == 2); - pop = pop_slot_insn(to); - if (!pop) { - free_insn(push); - return warn("out of memory"), -ENOMEM; - } - pop->bytecode_offset = bc_offset; + if (slot_size == 1) + err = insert_copy_slot_32_insns(from, to, push_before, bc_offset); + else + err = insert_copy_slot_64_insns(from, to, push_before, bc_offset); - list_add_tail(&push->insn_list_node, push_before); - list_add(&pop->insn_list_node, &push->insn_list_node); + if (err) + return warn("out of memory"), -ENOMEM; return 0; - } static int __insert_spill_reload_insn(struct live_interval *interval, struct compilation_unit *cu) @@ -283,8 +277,8 @@ static void insert_mov_insns(struct compilation_unit *cu, to_it = mappings[i].to; if (to_it->need_reload && interval_start(to_it) >= to_bb->start_insn) { - insert_copy_slot_insn(mappings[i].to, cu, slots[i], - to_it->spill_parent->spill_slot, + insert_copy_slot_insn(slots[i], to_it->spill_parent->spill_slot, + to_it->var_info->vm_type, push_before, bc_offset); continue; } diff --git a/jit/stack-slot.c b/jit/stack-slot.c index 87f8fde..882cb20 100644 --- a/jit/stack-slot.c +++ b/jit/stack-slot.c @@ -102,3 +102,13 @@ struct stack_slot *get_spill_slot_64(struct stack_frame *frame) { return __get_spill_slot(frame, 2); } + +struct stack_slot *get_spill_slot(struct stack_frame *frame, enum vm_type type) +{ + return __get_spill_slot(frame, vm_type_slot_size(type)); +} + +struct stack_slot *get_next_slot(struct stack_slot *slot) +{ + return get_local_slot(slot->parent, slot->index + 1); +} -- 1.6.3.3 ------------------------------------------------------------------------------ Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day trial. Simplify your report design, integration and deployment - and focus on what you do best, core application coding. Discover what's new with Crystal Reports now. http://p.sf.net/sfu/bobj-july _______________________________________________ Jatovm-devel mailing list Jatovm-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/jatovm-devel