Amazingly enough, my patch worked well enough that my NetBSD VAX kernel built with GCC 5.3 is no longer crashing. I feel pretty good about what I have so far so here's the complete diff for both the C++ exception fix and the bad condition codes optimizer bug. It should be good enough to check into NetBSD current, at least, and I believe it does fix most, if not all, of the bad code generation bugs with optimization on VAX.
-Jake Index: gcc/except.c =================================================================== RCS file: /cvsroot/src/external/gpl3/gcc/dist/gcc/except.c,v retrieving revision 1.3 diff -u -r1.3 except.c --- gcc/except.c 23 Mar 2016 15:51:36 -0000 1.3 +++ gcc/except.c 28 Mar 2016 23:24:40 -0000 @@ -2288,7 +2288,8 @@ #endif { #ifdef EH_RETURN_HANDLER_RTX - emit_move_insn (EH_RETURN_HANDLER_RTX, crtl->eh.ehr_handler); + rtx insn = emit_move_insn (EH_RETURN_HANDLER_RTX, crtl->eh.ehr_handler); + RTX_FRAME_RELATED_P (insn) = 1; // XXX FIXME in VAX backend? #else error ("__builtin_eh_return not supported on this target"); #endif Index: gcc/config/vax/builtins.md =================================================================== RCS file: /cvsroot/src/external/gpl3/gcc/dist/gcc/config/vax/builtins.md,v retrieving revision 1.5 diff -u -r1.5 builtins.md --- gcc/config/vax/builtins.md 24 Jan 2016 09:43:34 -0000 1.5 +++ gcc/config/vax/builtins.md 28 Mar 2016 23:24:40 -0000 @@ -50,7 +50,8 @@ (ctz:SI (match_operand:SI 1 "general_operand" "nrmT"))) (set (cc0) (match_dup 0))] "" - "ffs $0,$32,%1,%0") + "ffs $0,$32,%1,%0" + [(set_attr "cc" "clobber")]) (define_expand "sync_lock_test_and_set<mode>" [(set (match_operand:VAXint 0 "nonimmediate_operand" "=&g") @@ -88,7 +89,8 @@ (match_dup 1)) (const_int 1))])] "" - "jbssi %1,%0,%l2") + "jbssi %1,%0,%l2" + [(set_attr "cc" "none")]) (define_insn "jbbssihi" [(parallel @@ -105,7 +107,8 @@ (match_dup 1)) (const_int 1))])] "" - "jbssi %1,%0,%l2") + "jbssi %1,%0,%l2" + [(set_attr "cc" "none")]) (define_insn "jbbssisi" [(parallel @@ -122,8 +125,8 @@ (match_dup 1)) (const_int 1))])] "" - "jbssi %1,%0,%l2") - + "jbssi %1,%0,%l2" + [(set_attr "cc" "none")]) (define_expand "sync_lock_release<mode>" [(set (match_operand:VAXint 0 "memory_operand" "+m") @@ -160,7 +163,8 @@ (match_dup 1)) (const_int 0))])] "" - "jbcci %1,%0,%l2") + "jbcci %1,%0,%l2" + [(set_attr "cc" "none")]) (define_insn "jbbccihi" [(parallel @@ -177,7 +181,8 @@ (match_dup 1)) (const_int 0))])] "" - "jbcci %1,%0,%l2") + "jbcci %1,%0,%l2" + [(set_attr "cc" "none")]) (define_insn "jbbccisi" [(parallel @@ -194,4 +199,5 @@ (match_dup 1)) (const_int 0))])] "" - "jbcci %1,%0,%l2") + "jbcci %1,%0,%l2" + [(set_attr "cc" "none")]) Index: gcc/config/vax/elf.h =================================================================== RCS file: /cvsroot/src/external/gpl3/gcc/dist/gcc/config/vax/elf.h,v retrieving revision 1.6 diff -u -r1.6 elf.h --- gcc/config/vax/elf.h 23 Mar 2016 15:51:37 -0000 1.6 +++ gcc/config/vax/elf.h 28 Mar 2016 23:24:40 -0000 @@ -26,7 +26,7 @@ #define REGISTER_PREFIX "%" #define REGISTER_NAMES \ { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", \ - "%r8", "%r9", "%r10", "%r11", "%ap", "%fp", "%sp", "%pc", } + "%r8", "%r9", "%r10", "%r11", "%ap", "%fp", "%sp", "%pc", "%psw", } #undef SIZE_TYPE #define SIZE_TYPE "long unsigned int" @@ -45,18 +45,8 @@ count pushed by the CALLS and before the start of the saved registers. */ #define INCOMING_FRAME_SP_OFFSET 0 -/* Offset from the frame pointer register value to the top of the stack. */ -#define FRAME_POINTER_CFA_OFFSET(FNDECL) 0 - -/* We use R2-R5 (call-clobbered) registers for exceptions. */ -#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 2 : INVALID_REGNUM) - -/* Place the top of the stack for the DWARF2 EH stackadj value. */ -#define EH_RETURN_STACKADJ_RTX \ - gen_rtx_MEM (SImode, \ - plus_constant (Pmode, \ - gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),\ - -4)) +/* We use R2-R3 (call-clobbered) registers for exceptions. */ +#define EH_RETURN_DATA_REGNO(N) ((N) < 2 ? (N) + 2 : INVALID_REGNUM) /* Simple store the return handler into the call frame. */ #define EH_RETURN_HANDLER_RTX \ @@ -66,10 +56,6 @@ 16)) -/* Reserve the top of the stack for exception handler stackadj value. */ -#undef STARTING_FRAME_OFFSET -#define STARTING_FRAME_OFFSET -4 - /* The VAX wants no space between the case instruction and the jump table. */ #undef ASM_OUTPUT_BEFORE_CASE_LABEL #define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) Index: gcc/config/vax/vax-protos.h =================================================================== RCS file: /cvsroot/src/external/gpl3/gcc/dist/gcc/config/vax/vax-protos.h,v retrieving revision 1.5 diff -u -r1.5 vax-protos.h --- gcc/config/vax/vax-protos.h 23 Mar 2016 21:09:04 -0000 1.5 +++ gcc/config/vax/vax-protos.h 28 Mar 2016 23:24:40 -0000 @@ -28,9 +28,9 @@ extern const char *rev_cond_name (rtx); extern void print_operand_address (FILE *, rtx); extern void print_operand (FILE *, rtx, int); -extern void vax_notice_update_cc (rtx, rtx); +extern void vax_notice_update_cc (rtx, rtx_insn *insn); extern void vax_expand_addsub_di_operands (rtx *, enum rtx_code); -extern bool vax_decomposed_dimode_operand_p (rtx, rtx); +/* extern bool vax_decomposed_dimode_operand_p (rtx, rtx); */ extern const char * vax_output_int_move (rtx, rtx *, machine_mode); extern const char * vax_output_int_add (rtx, rtx *, machine_mode); extern const char * vax_output_int_subtract (rtx, rtx *, machine_mode); Index: gcc/config/vax/vax.c =================================================================== RCS file: /cvsroot/src/external/gpl3/gcc/dist/gcc/config/vax/vax.c,v retrieving revision 1.15 diff -u -r1.15 vax.c --- gcc/config/vax/vax.c 24 Mar 2016 04:27:29 -0000 1.15 +++ gcc/config/vax/vax.c 28 Mar 2016 23:24:40 -0000 @@ -1,4 +1,4 @@ -/* Subroutines for insn-output.c for VAX. +/* Subroutines used for code generation on VAX. Copyright (C) 1987-2015 Free Software Foundation, Inc. This file is part of GCC. @@ -191,15 +191,24 @@ vax_expand_prologue (void) { int regno, offset; - int mask = 0; + unsigned int mask = 0; HOST_WIDE_INT size; rtx insn; - offset = 20; - for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) - if (df_regs_ever_live_p (regno) && !call_used_regs[regno]) + offset = 20; // starting offset + + /* We only care about r2 to r11 here. AP, FP, and SP are saved by CALLS. + Always save r2 and r3 when eh_return is called, to reserve space for + the stack unwinder to update them in the stack frame on exceptions. + r0 and r1 are always available for function return values and are + also used by C++. */ + + unsigned int testbit = (1 << 2); + for (regno = 2; regno < VAX_AP_REGNUM; regno++, testbit <<= 1) + if ((df_regs_ever_live_p (regno) && !call_used_regs[regno]) + || (crtl->calls_eh_return /* && regno >= 2 */ && regno < 4)) { - mask |= 1 << regno; + mask |= testbit; offset += 4; } @@ -228,20 +237,16 @@ insn = emit_insn (gen_blockage ()); RTX_FRAME_RELATED_P (insn) = 1; -#ifdef notyet - /* - * We can't do this, the dwarf code asserts and we don't have yet a - * way to get to the psw - */ vax_add_reg_cfa_offset (insn, 4, gen_rtx_REG (Pmode, PSW_REGNUM)); -#endif vax_add_reg_cfa_offset (insn, 8, arg_pointer_rtx); vax_add_reg_cfa_offset (insn, 12, frame_pointer_rtx); vax_add_reg_cfa_offset (insn, 16, pc_rtx); - offset = 20; - for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) - if (mask & (1 << regno)) + offset = 20; // reset to starting value. + + testbit = (1 << 2); // reset to starting bit for r2. + for (regno = 2; regno < VAX_AP_REGNUM; regno++, testbit <<= 1) + if (mask & testbit) { vax_add_reg_cfa_offset (insn, offset, gen_rtx_REG (SImode, regno)); offset += 4; @@ -1131,61 +1136,51 @@ /* Worker function for NOTICE_UPDATE_CC. */ void -vax_notice_update_cc (rtx exp, rtx insn ATTRIBUTE_UNUSED) +vax_notice_update_cc (rtx exp, rtx_insn *insn) { + attr_cc cc = get_attr_cc (insn); + if (GET_CODE (exp) == SET) { if (GET_CODE (SET_SRC (exp)) == CALL) CC_STATUS_INIT; - else if (GET_CODE (SET_DEST (exp)) != ZERO_EXTRACT - && GET_CODE (SET_DEST (exp)) != PC) + else if (GET_CODE (SET_DEST (exp)) != PC + && cc == CC_COMPARE) { - cc_status.flags = 0; - /* The integer operations below don't set carry or + /* Some of the integer operations don't set carry or set it in an incompatible way. That's ok though as the Z bit is all we need when doing unsigned comparisons on the result of these insns (since they're always with 0). Set CC_NO_OVERFLOW to generate the correct unsigned branches. */ - switch (GET_CODE (SET_SRC (exp))) - { - case NEG: - if (GET_MODE_CLASS (GET_MODE (exp)) == MODE_FLOAT) - break; - case AND: - case IOR: - case XOR: - case NOT: - case CTZ: - case MEM: - case REG: - cc_status.flags = CC_NO_OVERFLOW; - break; - default: - break; - } + cc_status.flags = CC_NO_OVERFLOW; cc_status.value1 = SET_DEST (exp); cc_status.value2 = SET_SRC (exp); } + else if (cc != CC_NONE) + CC_STATUS_INIT; } else if (GET_CODE (exp) == PARALLEL && GET_CODE (XVECEXP (exp, 0, 0)) == SET) { - if (GET_CODE (SET_SRC (XVECEXP (exp, 0, 0))) == CALL) + rtx exp0 = XVECEXP (exp, 0, 0); + if (GET_CODE (SET_SRC (exp0)) == CALL) CC_STATUS_INIT; - else if (GET_CODE (SET_DEST (XVECEXP (exp, 0, 0))) != PC) + else if (GET_CODE (SET_DEST (exp0)) != PC + && cc == CC_COMPARE) { - cc_status.flags = 0; - cc_status.value1 = SET_DEST (XVECEXP (exp, 0, 0)); - cc_status.value2 = SET_SRC (XVECEXP (exp, 0, 0)); + cc_status.flags = CC_NO_OVERFLOW; + cc_status.value1 = SET_DEST (exp0); + cc_status.value2 = SET_SRC (exp0); } - else + else if (cc != CC_NONE) /* PARALLELs whose first element sets the PC are aob, sob insns. They do change the cc's. */ CC_STATUS_INIT; } - else + else if (cc != CC_NONE) CC_STATUS_INIT; + if (cc_status.value1 && REG_P (cc_status.value1) && cc_status.value2 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2)) @@ -1909,12 +1904,20 @@ return true; if (indirectable_address_p (x, strict, false)) return true; - xfoo0 = XEXP (x, 0); - if (MEM_P (x) && indirectable_address_p (xfoo0, strict, true)) - return true; - if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC) - && BASE_REGISTER_P (xfoo0, strict)) - return true; + /* Note: avoid calling XEXP until needed. It may not be a valid type. + This fixes an assertion failure when RTX checking is enabled. */ + if (MEM_P (x)) + { + xfoo0 = XEXP (x, 0); + if (indirectable_address_p (xfoo0, strict, true)) + return true; + } + if (GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC) + { + xfoo0 = XEXP (x, 0); + if (BASE_REGISTER_P (xfoo0, strict)) + return true; + } return false; } @@ -2366,6 +2369,9 @@ : (int_size_in_bytes (type) + 3) & ~3); } +#if 0 +/* This is commented out because the only usage of it was the buggy + 32-to-64-bit peephole optimizations that have been commented out. */ bool vax_decomposed_dimode_operand_p (rtx lo, rtx hi) { @@ -2416,3 +2422,4 @@ return rtx_equal_p(lo, hi) && lo_offset + 4 == hi_offset; } +#endif Index: gcc/config/vax/vax.h =================================================================== RCS file: /cvsroot/src/external/gpl3/gcc/dist/gcc/config/vax/vax.h,v retrieving revision 1.7 diff -u -r1.7 vax.h --- gcc/config/vax/vax.h 23 Mar 2016 15:51:37 -0000 1.7 +++ gcc/config/vax/vax.h 28 Mar 2016 23:24:40 -0000 @@ -120,13 +120,17 @@ The hardware registers are assigned numbers for the compiler from 0 to just below FIRST_PSEUDO_REGISTER. All registers that the compiler knows about must be given numbers, - even those that are not normally considered general registers. */ -#define FIRST_PSEUDO_REGISTER 16 + even those that are not normally considered general registers. + This includes PSW, which the VAX backend did not originally include. */ +#define FIRST_PSEUDO_REGISTER 17 + +/* For compatibility, DWARF_FRAME_REGISTERS must still be 16. */ +#define DWARF_FRAME_REGISTERS 16 /* 1 for registers that have pervasive standard uses and are not available for the register allocator. - On the VAX, these are the AP, FP, SP and PC. */ -#define FIXED_REGISTERS {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1} + On the VAX, these are the AP, FP, SP, PC, and PSW. */ +#define FIXED_REGISTERS {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} /* 1 for registers not available across function calls. These must include the FIXED_REGISTERS and also any @@ -134,7 +138,7 @@ The latter must include the registers where values are returned and the register where structure-value addresses are passed. Aside from that, you can include as many other registers as you like. */ -#define CALL_USED_REGISTERS {1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1} +#define CALL_USED_REGISTERS {1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} /* Return number of consecutive hard regs needed starting at reg REGNO to hold something of mode MODE. @@ -169,12 +173,12 @@ /* Base register for access to local variables of the function. */ #define FRAME_POINTER_REGNUM VAX_FP_REGNUM -/* Offset from the frame pointer register value to the top of stack. */ -#define FRAME_POINTER_CFA_OFFSET(FNDECL) 0 - /* Base register for access to arguments of the function. */ #define ARG_POINTER_REGNUM VAX_AP_REGNUM +/* Offset from the argument pointer register value to the CFA. */ +#define ARG_POINTER_CFA_OFFSET(FNDECL) 0 + /* Register in which static-chain is passed to a function. */ #define STATIC_CHAIN_REGNUM 0 @@ -395,9 +399,9 @@ allocation. */ #define REGNO_OK_FOR_INDEX_P(regno) \ - ((regno) < FIRST_PSEUDO_REGISTER || reg_renumber[regno] >= 0) + ((regno) <= VAX_PC_REGNUM || reg_renumber[regno] >= 0) #define REGNO_OK_FOR_BASE_P(regno) \ - ((regno) < FIRST_PSEUDO_REGISTER || reg_renumber[regno] >= 0) + ((regno) <= VAX_PC_REGNUM || reg_renumber[regno] >= 0) /* Maximum number of registers that can appear in a valid memory address. */ @@ -424,11 +428,11 @@ /* Nonzero if X is a hard reg that can be used as an index or if it is a pseudo reg. */ -#define REG_OK_FOR_INDEX_P(X) 1 +#define REG_OK_FOR_INDEX_P(X) ((regno) != VAX_PSW_REGNUM) /* Nonzero if X is a hard reg that can be used as a base reg or if it is a pseudo reg. */ -#define REG_OK_FOR_BASE_P(X) 1 +#define REG_OK_FOR_BASE_P(X) ((regno) != VAX_PSW_REGNUM) #else @@ -508,12 +512,6 @@ #define NOTICE_UPDATE_CC(EXP, INSN) \ vax_notice_update_cc ((EXP), (INSN)) - -#define OUTPUT_JUMP(NORMAL, FLOAT, NO_OV) \ - { if (cc_status.flags & CC_NO_OVERFLOW) \ - return NO_OV; \ - return NORMAL; \ - } /* Control the assembler format that we output. */ @@ -548,7 +546,7 @@ #define REGISTER_PREFIX "" #define REGISTER_NAMES \ { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ - "r8", "r9", "r10", "r11", "ap", "fp", "sp", "pc", } + "r8", "r9", "r10", "r11", "ap", "fp", "sp", "pc", "psw", } /* This is BSD, so it wants DBX format. */ Index: gcc/config/vax/vax.md =================================================================== RCS file: /cvsroot/src/external/gpl3/gcc/dist/gcc/config/vax/vax.md,v retrieving revision 1.11 diff -u -r1.11 vax.md --- gcc/config/vax/vax.md 23 Mar 2016 15:51:37 -0000 1.11 +++ gcc/config/vax/vax.md 28 Mar 2016 23:24:40 -0000 @@ -62,6 +62,24 @@ (include "constraints.md") (include "predicates.md") +;; Condition code attribute. This is used to track the meaning of flags +;; after an insn executes. Often, explicit compare insns can be deleted +;; if a conditional branch can be generated to use an existing CC value. +;; This is for the CC0 mechanism, also used by the m68k and avr backends. +;; +;; On VAX, the default "cc" is "clobber". This means that the Z and N +;; flags can not be used to compare operand 0 to numeric 0 later. If the +;; "cc" is "compare" then the Z and N flags can be used in this way. +;; Unsigned integer comparisons are handled correctly by always setting +;; CC_NO_OVERFLOW in vax_notice_update_cc, so that the C flag is ignored, +;; and unsigned comparisons only use equality/inequality testing with Z. +;; A "cc" of "none" means that Z and N will never be modified by this insn. + +(define_attr "cc" "none,compare,clobber" (const_string "clobber")) + +;; Comparison instructions. tst and cmp set the flags based on (%0 < %1), +;; so the "cc" attr is set to "clobber", unless operand 1 is 0. + (define_insn "*cmp<mode>" [(set (cc0) (compare (match_operand:VAXint 0 "nonimmediate_operand" "nrmT,nrmT") @@ -69,7 +87,8 @@ "" "@ tst<VAXint:isfx> %0 - cmp<VAXint:isfx> %0,%1") + cmp<VAXint:isfx> %0,%1" + [(set_attr "cc" "compare,clobber")]) (define_insn "*cmp<mode>" [(set (cc0) @@ -78,7 +97,12 @@ "" "@ tst<VAXfp:fsfx> %0 - cmp<VAXfp:fsfx> %0,%1") + cmp<VAXfp:fsfx> %0,%1" + [(set_attr "cc" "compare,clobber")]) + +;; The bit instruction has different behavior from the last two insns, +;; performing a logical and on %0 and %1, then comparing the result to 0. +;; We can't make any assumptions about operand 0 itself from this test. (define_insn "*bit<mode>" [(set (cc0) @@ -86,7 +110,8 @@ (match_operand:VAXint 1 "general_operand" "nrmT")) (const_int 0)))] "" - "bit<VAXint:isfx> %0,%1") + "bit<VAXint:isfx> %0,%1" + [(set_attr "cc" "clobber")]) ;; The VAX has no sCOND insns. It does have add/subtract with carry ;; which could be used to implement the sltu and sgeu patterns. However, @@ -96,26 +121,27 @@ ;; and has been deleted. +;; The mov instruction sets Z and N flags by comparing operand 0 to 0. +;; The V flag is cleared to 0, and C is unchanged. clr has the same +;; semantics, but N is always cleared and Z is always set (0 == 0). (define_insn "mov<mode>" [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g") (match_operand:VAXfp 1 "general_operand" "G,gF"))] "" "@ clr<VAXfp:fsfx> %0 - mov<VAXfp:fsfx> %1,%0") - -;; Some VAXen don't support this instruction. -;;(define_insn "movti" -;; [(set (match_operand:TI 0 "general_operand" "=g") -;; (match_operand:TI 1 "general_operand" "g"))] -;; "" -;; "movh %1,%0") + mov<VAXfp:fsfx> %1,%0" + [(set_attr "cc" "compare")]) +;; The variety of possible returns from vax_output_int_move() is such that +;; it's best to consider the flags to be clobbered, rather than to expect +;; any particular compare-to-zero behavior from the sequence. (define_insn "movdi" [(set (match_operand:DI 0 "nonimmediate_operand" "=g") (match_operand:DI 1 "general_operand" "g"))] "" - "* return vax_output_int_move (insn, operands, DImode);") + "* return vax_output_int_move (insn, operands, DImode);" + [(set_attr "cc" "clobber")]) ;; The VAX move instructions have space-time tradeoffs. On a MicroVAX ;; register-register mov instructions take 3 bytes and 2 CPU cycles. clrl @@ -138,8 +164,7 @@ [(set (match_operand:SI 0 "nonimmediate_operand" "") (match_operand:SI 1 "general_operand" ""))] "" - " -{ + "{ #ifdef NO_EXTERNAL_INDIRECT_ADDRESS if (flag_pic && GET_CODE (operands[1]) == CONST @@ -154,24 +179,35 @@ DONE; } #endif -}") + }") (define_insn "movsi_2" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (match_operand:SI 1 "nonsymbolic_operand" "nrmT"))] "" - "* return vax_output_int_move (insn, operands, SImode);") + "* return vax_output_int_move (insn, operands, SImode);" + [(set_attr "cc" "clobber")]) (define_insn "mov<mode>" [(set (match_operand:VAXintQH 0 "nonimmediate_operand" "=g") (match_operand:VAXintQH 1 "general_operand" "g"))] "" - "* return vax_output_int_move (insn, operands, <MODE>mode);") + "* return vax_output_int_move (insn, operands, <MODE>mode);" + [(set_attr "cc" "clobber")]) + +;; These cases all set Z and N to comparison with zero. +;; The case of a constant from 64 to 255 is handled below. +(define_expand "movstricthi" + [(set (strict_low_part (match_operand:HI 0 "register_operand" "+g")) + (match_operand:HI 1 "general_operand" "g"))]) -(define_insn "movstricthi" +(define_insn "*movstricthi" [(set (strict_low_part (match_operand:HI 0 "register_operand" "+g")) (match_operand:HI 1 "general_operand" "g"))] - "" + "!CONST_INT_P (operands[1]) + || ((unsigned int)INTVAL (operands[1]) < 64) + || ((unsigned int)~INTVAL (operands[1]) < 64) + || ((unsigned int)INTVAL (operands[1]) > 255)" "* { if (CONST_INT_P (operands[1])) @@ -184,11 +220,22 @@ else if ((unsigned int)~i < 64) return \"mcomw %H1,%0\"; else if ((unsigned int)i < 256) - return \"movzbw %1,%0\"; + gcc_unreachable (); + /* return \"movzbw %1,%0\"; */ } return \"movw %1,%0\"; -}") +}" + [(set_attr "cc" "compare")]) +;; This variant is different because it only sets the Z flag. +(define_insn "*movstricthi_2" + [(set (strict_low_part (match_operand:HI 0 "register_operand" "+g")) + (match_operand:HI 1 "general_operand" "g"))] + "CONST_INT_P (operands[1]) && ((unsigned int)INTVAL (operands[1]) < 256)" + "movzbw %1,%0" + [(set_attr "cc" "clobber")]) + +;; This insn is missing the "movzbw" variant of the previous one. (define_insn "movstrictqi" [(set (strict_low_part (match_operand:QI 0 "register_operand" "+g")) (match_operand:QI 1 "general_operand" "g"))] @@ -204,7 +251,8 @@ return \"mcomb %B1,%0\"; } return \"movb %1,%0\"; -}") +}" + [(set_attr "cc" "compare")]) ;; This is here to accept 4 arguments and pass the first 3 along ;; to the movmemhi1 pattern that really does the work. @@ -235,7 +283,8 @@ (match_operand:BLK 1 "memory_operand" "B")) (use (match_operand:SI 2 "const_int_operand" "g"))] "INTVAL (operands[2]) <= 48" - "* return vax_output_movmemsi (insn, operands);") + "* return vax_output_movmemsi (insn, operands);" + [(set_attr "cc" "clobber")]) (define_insn "movmemhi1" [(set (match_operand:BLK 0 "memory_operand" "=o") @@ -248,75 +297,94 @@ (clobber (reg:SI 4)) (clobber (reg:SI 5))] "" - "movc3 %2,%1,%0") + "movc3 %2,%1,%0" + [(set_attr "cc" "clobber")]) ;; Extension and truncation insns. +;; The cvt instructions update all four flags. Z and N can be used for +;; equality to 0 and for signed integer comparison to 0. C is set to 0. +;; The V flag indicates an overflow on truncation to a smaller type. + (define_insn "truncsiqi2" [(set (match_operand:QI 0 "nonimmediate_operand" "=g") (truncate:QI (match_operand:SI 1 "nonimmediate_operand" "nrmT")))] "" - "cvtlb %1,%0") + "cvtlb %1,%0" + [(set_attr "cc" "compare")]) (define_insn "truncsihi2" [(set (match_operand:HI 0 "nonimmediate_operand" "=g") (truncate:HI (match_operand:SI 1 "nonimmediate_operand" "nrmT")))] "" - "cvtlw %1,%0") + "cvtlw %1,%0" + [(set_attr "cc" "compare")]) (define_insn "trunchiqi2" [(set (match_operand:QI 0 "nonimmediate_operand" "=g") (truncate:QI (match_operand:HI 1 "nonimmediate_operand" "g")))] "" - "cvtwb %1,%0") + "cvtwb %1,%0" + [(set_attr "cc" "compare")]) (define_insn "extendhisi2" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))] "" - "cvtwl %1,%0") + "cvtwl %1,%0" + [(set_attr "cc" "compare")]) (define_insn "extendqihi2" [(set (match_operand:HI 0 "nonimmediate_operand" "=g") (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))] "" - "cvtbw %1,%0") + "cvtbw %1,%0" + [(set_attr "cc" "compare")]) (define_insn "extendqisi2" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))] "" - "cvtbl %1,%0") + "cvtbl %1,%0" + [(set_attr "cc" "compare")]) (define_insn "extendsfdf2" [(set (match_operand:DF 0 "nonimmediate_operand" "=g") (float_extend:DF (match_operand:SF 1 "general_operand" "gF")))] "" - "cvtf%# %1,%0") + "cvtf%# %1,%0" + [(set_attr "cc" "compare")]) (define_insn "truncdfsf2" [(set (match_operand:SF 0 "nonimmediate_operand" "=g") (float_truncate:SF (match_operand:DF 1 "general_operand" "gF")))] "" - "cvt%#f %1,%0") + "cvt%#f %1,%0" + [(set_attr "cc" "compare")]) + +;; The movzbw, movzbl, movzwl instructions set the Z flag for (%0 == 0). +;; N and V are cleared and C is not modified. Only Z is useful to test. (define_insn "zero_extendhisi2" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))] "" - "movzwl %1,%0") + "movzwl %1,%0" + [(set_attr "cc" "clobber")]) (define_insn "zero_extendqihi2" [(set (match_operand:HI 0 "nonimmediate_operand" "=g") (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))] "" - "movzbw %1,%0") + "movzbw %1,%0" + [(set_attr "cc" "clobber")]) (define_insn "zero_extendqisi2" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))] "" - "movzbl %1,%0") + "movzbl %1,%0" + [(set_attr "cc" "clobber")]) ;; Fix-to-float conversion insns. @@ -324,7 +392,8 @@ [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g") (float:VAXfp (match_operand:VAXint 1 "nonimmediate_operand" "g")))] "" - "cvt<VAXint:isfx><VAXfp:fsfx> %1,%0") + "cvt<VAXint:isfx><VAXfp:fsfx> %1,%0" + [(set_attr "cc" "compare")]) ;; Float-to-fix conversion insns. @@ -332,12 +401,12 @@ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g") (fix:VAXint (match_operand:VAXfp 1 "general_operand" "gF")))] "" - "cvt<VAXfp:fsfx><VAXint:isfx> %1,%0") + "cvt<VAXfp:fsfx><VAXint:isfx> %1,%0" + [(set_attr "cc" "compare")]) (define_expand "fixuns_trunc<VAXfp:mode><VAXint:mode>2" [(set (match_operand:VAXint 0 "nonimmediate_operand" "") - (fix:VAXint (match_operand:VAXfp 1 "general_operand")))] - "") + (fix:VAXint (match_operand:VAXfp 1 "general_operand")))]) ;;- All kinds of add instructions. @@ -349,42 +418,48 @@ "@ add<VAXfp:fsfx>2 %2,%0 add<VAXfp:fsfx>2 %1,%0 - add<VAXfp:fsfx>3 %1,%2,%0") + add<VAXfp:fsfx>3 %1,%2,%0" + [(set_attr "cc" "compare")]) (define_insn "pushlclsymreg" [(set (match_operand:SI 0 "push_operand" "=g") (plus:SI (match_operand:SI 1 "register_operand" "%r") (match_operand:SI 2 "local_symbolic_operand" "i")))] "flag_pic" - "pushab %a2[%1]") + "pushab %a2[%1]" + [(set_attr "cc" "compare")]) (define_insn "pushextsymreg" [(set (match_operand:SI 0 "push_operand" "=g") (plus:SI (match_operand:SI 1 "register_operand" "%r") (match_operand:SI 2 "external_symbolic_operand" "i")))] "flag_pic" - "pushab %a2[%1]") + "pushab %a2[%1]" + [(set_attr "cc" "compare")]) (define_insn "movlclsymreg" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (plus:SI (match_operand:SI 1 "register_operand" "%r") (match_operand:SI 2 "local_symbolic_operand" "i")))] "flag_pic" - "movab %a2[%1],%0") + "movab %a2[%1],%0" + [(set_attr "cc" "compare")]) (define_insn "movextsymreg" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (plus:SI (match_operand:SI 1 "register_operand" "%r") (match_operand:SI 2 "external_symbolic_operand" "i")))] "flag_pic" - "movab %a2[%1],%0") + "movab %a2[%1],%0" + [(set_attr "cc" "compare")]) (define_insn "add<mode>3" [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g") (plus:VAXint (match_operand:VAXint 1 "general_operand" "nrmT") (match_operand:VAXint 2 "general_operand" "nrmT")))] "" - "* return vax_output_int_add (insn, operands, <MODE>mode);") + "* return vax_output_int_add (insn, operands, <MODE>mode);" + [(set_attr "cc" "clobber")]) (define_expand "adddi3" [(set (match_operand:DI 0 "nonimmediate_operand" "=g") @@ -398,7 +473,8 @@ (plus:DI (match_operand:DI 1 "general_addsub_di_operand" "%0") (match_operand:DI 2 "general_addsub_di_operand" "nRr")))] "TARGET_QMATH" - "* return vax_output_int_add (insn, operands, DImode);") + "* return vax_output_int_add (insn, operands, DImode);" + [(set_attr "cc" "clobber")]) ;; The add-with-carry (adwc) instruction only accepts two operands. (define_insn "adddi3_old" @@ -406,7 +482,8 @@ (plus:DI (match_operand:DI 1 "general_operand" "%0,ro>") (match_operand:DI 2 "general_operand" "Fsro,Fs")))] "!TARGET_QMATH" - "* return vax_output_int_add (insn, operands, DImode);") + "* return vax_output_int_add (insn, operands, DImode);" + [(set_attr "cc" "clobber")]) ;;- All kinds of subtract instructions. @@ -417,7 +494,8 @@ "" "@ sub<VAXfp:fsfx>2 %2,%0 - sub<VAXfp:fsfx>3 %2,%1,%0") + sub<VAXfp:fsfx>3 %2,%1,%0" + [(set_attr "cc" "compare")]) (define_insn "sub<mode>3" [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g") @@ -426,7 +504,8 @@ "" "@ sub<VAXint:isfx>2 %2,%0 - sub<VAXint:isfx>3 %2,%1,%0") + sub<VAXint:isfx>3 %2,%1,%0" + [(set_attr "cc" "compare")]) (define_expand "subdi3" [(set (match_operand:DI 0 "nonimmediate_operand" "=g") @@ -440,7 +519,8 @@ (minus:DI (match_operand:DI 1 "general_addsub_di_operand" "0,I") (match_operand:DI 2 "general_addsub_di_operand" "nRr,Rr")))] "TARGET_QMATH" - "* return vax_output_int_subtract (insn, operands, DImode);") + "* return vax_output_int_subtract (insn, operands, DImode);" + [(set_attr "cc" "clobber")]) ;; The subtract-with-carry (sbwc) instruction only takes two operands. (define_insn "subdi3_old" @@ -448,7 +528,8 @@ (minus:DI (match_operand:DI 1 "general_operand" "0,or>") (match_operand:DI 2 "general_operand" "Fsor,Fs")))] "!TARGET_QMATH" - "* return vax_output_int_subtract (insn, operands, DImode);") + "* return vax_output_int_subtract (insn, operands, DImode);" + [(set_attr "cc" "clobber")]) ;;- Multiply instructions. @@ -460,7 +541,8 @@ "@ mul<VAXfp:fsfx>2 %2,%0 mul<VAXfp:fsfx>2 %1,%0 - mul<VAXfp:fsfx>3 %1,%2,%0") + mul<VAXfp:fsfx>3 %1,%2,%0" + [(set_attr "cc" "compare")]) (define_insn "mul<mode>3" [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g,g") @@ -470,7 +552,8 @@ "@ mul<VAXint:isfx>2 %2,%0 mul<VAXint:isfx>2 %1,%0 - mul<VAXint:isfx>3 %1,%2,%0") + mul<VAXint:isfx>3 %1,%2,%0" + [(set_attr "cc" "compare")]) (define_insn "mulsidi3" [(set (match_operand:DI 0 "nonimmediate_operand" "=g") @@ -479,7 +562,8 @@ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "nrmT"))))] "" - "emul %1,%2,$0,%0") + "emul %1,%2,$0,%0" + [(set_attr "cc" "compare")]) (define_insn "" [(set (match_operand:DI 0 "nonimmediate_operand" "=g") @@ -490,7 +574,8 @@ (match_operand:SI 2 "nonimmediate_operand" "nrmT"))) (sign_extend:DI (match_operand:SI 3 "nonimmediate_operand" "g"))))] "" - "emul %1,%2,%3,%0") + "emul %1,%2,%3,%0" + [(set_attr "cc" "compare")]) ;; 'F' constraint means type CONST_DOUBLE (define_insn "" @@ -508,7 +593,8 @@ if (CONST_DOUBLE_HIGH (operands[3])) operands[3] = GEN_INT (CONST_DOUBLE_LOW (operands[3])); return \"emul %1,%2,%3,%0\"; -}") +}" + [(set_attr "cc" "compare")]) ;;- Divide instructions. @@ -519,7 +605,8 @@ "" "@ div<VAXfp:fsfx>2 %2,%0 - div<VAXfp:fsfx>3 %2,%1,%0") + div<VAXfp:fsfx>3 %2,%1,%0" + [(set_attr "cc" "compare")]) (define_insn "div<mode>3" [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g") @@ -528,19 +615,21 @@ "" "@ div<VAXint:isfx>2 %2,%0 - div<VAXint:isfx>3 %2,%1,%0") + div<VAXint:isfx>3 %2,%1,%0" + [(set_attr "cc" "compare")]) ;This is left out because it is very slow; ;we are better off programming around the "lack" of this insn. -;(define_insn "divmoddisi4" -; [(set (match_operand:SI 0 "general_operand" "=g") -; (div:SI (match_operand:DI 1 "general_operand" "g") -; (match_operand:SI 2 "general_operand" "g"))) -; (set (match_operand:SI 3 "general_operand" "=g") -; (mod:SI (match_operand:DI 1 "general_operand" "g") -; (match_operand:SI 2 "general_operand" "g")))] -; "" -; "ediv %2,%1,%0,%3") +;; FIXME: uncommented to see if it is ever used. +(define_insn "divmoddisi4" + [(parallel [(set (match_operand:SI 0 "nonimmediate_operand" "=g") + (div:SI (match_operand:DI 1 "general_operand" "nrmT") + (match_operand:SI 2 "general_operand" "nrmT"))) + (set (match_operand:SI 3 "nonimmediate_operand" "=g") + (mod:SI (match_dup 1) (match_dup 2)))])] + "" + "ediv %2,%1,%0,%3" + [(set_attr "cc" "compare")]) ;; Bit-and on the VAX is done with a clear-bits insn. (define_expand "and<mode>3" @@ -573,7 +662,8 @@ "" "@ bic<VAXint:isfx>2 %1,%0 - bic<VAXint:isfx>3 %1,%2,%0") + bic<VAXint:isfx>3 %1,%2,%0" + [(set_attr "cc" "compare")]) ;; The following used to be needed because constant propagation can ;; create them starting from the bic insn patterns above. This is no @@ -587,7 +677,8 @@ "" "@ bic<VAXint:isfx>2 %<VAXint:iprefx>2,%0 - bic<VAXint:isfx>3 %<VAXint:iprefx>2,%1,%0") + bic<VAXint:isfx>3 %<VAXint:iprefx>2,%1,%0" + [(set_attr "cc" "compare")]) ;;- Bit set instructions. @@ -600,7 +691,8 @@ "@ bis<VAXint:isfx>2 %2,%0 bis<VAXint:isfx>2 %1,%0 - bis<VAXint:isfx>3 %2,%1,%0") + bis<VAXint:isfx>3 %2,%1,%0" + [(set_attr "cc" "compare")]) ;;- xor instructions. @@ -612,26 +704,30 @@ "@ xor<VAXint:isfx>2 %2,%0 xor<VAXint:isfx>2 %1,%0 - xor<VAXint:isfx>3 %2,%1,%0") + xor<VAXint:isfx>3 %2,%1,%0" + [(set_attr "cc" "compare")]) (define_insn "neg<mode>2" [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g") (neg:VAXfp (match_operand:VAXfp 1 "general_operand" "gF")))] "" - "mneg<VAXfp:fsfx> %1,%0") + "mneg<VAXfp:fsfx> %1,%0" + [(set_attr "cc" "compare")]) (define_insn "neg<mode>2" [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g") (neg:VAXint (match_operand:VAXint 1 "general_operand" "nrmT")))] "" - "mneg<VAXint:isfx> %1,%0") + "mneg<VAXint:isfx> %1,%0" + [(set_attr "cc" "compare")]) (define_insn "one_cmpl<mode>2" [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g") (not:VAXint (match_operand:VAXint 1 "general_operand" "nrmT")))] "" - "mcom<VAXint:isfx> %1,%0") + "mcom<VAXint:isfx> %1,%0" + [(set_attr "cc" "compare")]) ;; Arithmetic right shift on the VAX works by negating the shift count, @@ -655,14 +751,16 @@ (ashiftrt:SI (match_operand:SI 1 "general_operand" "nrmT") (match_operand:QI 2 "const_int_operand" "n")))] "" - "ashl $%n2,%1,%0") + "ashl $%n2,%1,%0" + [(set_attr "cc" "compare")]) (define_insn "" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (ashiftrt:SI (match_operand:SI 1 "general_operand" "nrmT") (neg:QI (match_operand:QI 2 "general_operand" "g"))))] "" - "ashl %2,%1,%0") + "ashl %2,%1,%0" + [(set_attr "cc" "compare")]) (define_insn "ashlsi3" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") @@ -692,7 +790,8 @@ } } return \"ashl %2,%1,%0\"; -}") +}" + [(set_attr "cc" "compare")]) ;; Arithmetic right shift on the VAX works by negating the shift count. (define_expand "ashrdi3" @@ -710,14 +809,16 @@ (ashift:DI (match_operand:DI 1 "general_operand" "g") (match_operand:QI 2 "general_operand" "g")))] "" - "ashq %2,%D1,%0") + "ashq %2,%D1,%0" + [(set_attr "cc" "compare")]) (define_insn "" [(set (match_operand:DI 0 "nonimmediate_operand" "=g") (ashiftrt:DI (match_operand:DI 1 "general_operand" "g") (neg:QI (match_operand:QI 2 "general_operand" "g"))))] "" - "ashq %2,%D1,%0") + "ashq %2,%D1,%0" + [(set_attr "cc" "compare")]) ;; We used to have expand_shift handle logical right shifts by using extzv, ;; but this make it very difficult to do lshrdi3. Since the VAX is the @@ -742,7 +843,7 @@ ;; Rotate right on the VAX works by negating the shift count. (define_expand "rotrsi3" [(set (match_operand:SI 0 "general_operand" "=g") - (rotatert:SI (match_operand:SI 1 "general_operand" "g") + (rotatert:SI (match_operand:SI 1 "general_operand" "nrmT") (match_operand:QI 2 "general_operand" "g")))] "" " @@ -756,21 +857,24 @@ (rotate:SI (match_operand:SI 1 "general_operand" "nrmT") (match_operand:QI 2 "general_operand" "g")))] "" - "rotl %2,%1,%0") + "rotl %2,%1,%0" + [(set_attr "cc" "compare")]) (define_insn "" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (rotatert:SI (match_operand:SI 1 "general_operand" "nrmT") (match_operand:QI 2 "const_int_operand" "n")))] "" - "rotl %R2,%1,%0") + "rotl %R2,%1,%0" + [(set_attr "cc" "compare")]) (define_insn "" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (rotatert:SI (match_operand:SI 1 "general_operand" "nrmT") (neg:QI (match_operand:QI 2 "general_operand" "g"))))] "" - "rotl %2,%1,%0") + "rotl %2,%1,%0" + [(set_attr "cc" "compare")]) ;This insn is probably slower than a multiply and an add. ;(define_insn "" @@ -779,45 +883,64 @@ ; (match_operand:SI 2 "general_operand" "g")) ; (match_operand:SI 3 "general_operand" "g")))] ; "" -; "index %1,$0x80000000,$0x7fffffff,%3,%2,%0") +; "index %1,$0x80000000,$0x7fffffff,%3,%2,%0" +; [(set_attr "cc" "compare")]) ;; Special cases of bit-field insns which we should ;; recognize in preference to the general case. ;; These handle aligned 8-bit and 16-bit fields, ;; which can usually be done with move instructions. -;; netbsd changed this to REG_P (operands[0]) || (MEM_P (operands[0]) && ... -;; but gcc made it just !MEM_P (operands[0]) || ... +;; Split into two insns. This variant works for register destinations +;; where operand 2 is a multiple of operand 1. The insv instruction +;; is somewhat unusual for VAX in not modifying any of the flags. +(define_insn "" + [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+ro") + (match_operand:QI 1 "const_int_operand" "n") + (match_operand:SI 2 "const_int_operand" "n")) + (match_operand:SI 3 "general_operand" "g"))] + "(INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16) + && (INTVAL (operands[2]) != 0) && REG_P (operands[0]) + && (INTVAL (operands[2]) % INTVAL (operands[1]) == 0)" + "insv %3,%2,%1,%0" + [(set_attr "cc" "none")]) +;; This case handles a destination in memory. +;; TODO: investigate mode_dependent_address_p () and its meaning. (define_insn "" [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+ro") (match_operand:QI 1 "const_int_operand" "n") (match_operand:SI 2 "const_int_operand" "n")) (match_operand:SI 3 "general_operand" "g"))] "(INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16) - && INTVAL (operands[2]) % INTVAL (operands[1]) == 0 - && (REG_P (operands[0]) - || (MEM_P (operands[0]) - && ! mode_dependent_address_p (XEXP (operands[0], 0), - MEM_ADDR_SPACE (operands[0]))))" + && (INTVAL (operands[2]) % INTVAL (operands[1]) == 0) + && MEM_P (operands[0]) + && ! mode_dependent_address_p ( XEXP (operands[0], 0), + MEM_ADDR_SPACE (operands[0]))" "* { - if (REG_P (operands[0])) - { - if (INTVAL (operands[2]) != 0) - return \"insv %3,%2,%1,%0\"; - } - else - operands[0] - = adjust_address (operands[0], + operands[0] = adjust_address (operands[0], INTVAL (operands[1]) == 8 ? QImode : HImode, INTVAL (operands[2]) / 8); - CC_STATUS_INIT; if (INTVAL (operands[1]) == 8) return \"movb %3,%0\"; return \"movw %3,%0\"; -}") +}" + [(set_attr "cc" "compare")]) + +;; The "extzv" variant sets N and Z, but movz* sets Z only. +;; Split them into two definitions for different cc attributes. + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=&g") + (zero_extract:SI (match_operand:SI 1 "register_operand" "ro") + (match_operand:QI 2 "const_int_operand" "n") + (match_operand:SI 3 "const_int_operand" "n")))] + "REG_P (operands[1]) && INTVAL (operands[3]) != 0 + && (INTVAL (operands[3]) + INTVAL (operands[2]) < 32)" + "extzv %3,%2,%1,%0" + [(set_attr "cc" "compare")]) (define_insn "" [(set (match_operand:SI 0 "nonimmediate_operand" "=&g") @@ -826,19 +949,12 @@ (match_operand:SI 3 "const_int_operand" "n")))] "(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16) && INTVAL (operands[3]) % INTVAL (operands[2]) == 0 - && (REG_P (operands[1]) - || (MEM_P (operands[1]) - && ! mode_dependent_address_p (XEXP (operands[1], 0), - MEM_ADDR_SPACE (operands[1]))))" + && MEM_P (operands[1]) + && ! mode_dependent_address_p (XEXP (operands[1], 0), + MEM_ADDR_SPACE (operands[1]))" "* { - if (REG_P (operands[1])) - { - if (INTVAL (operands[3]) != 0) - return \"extzv %3,%2,%1,%0\"; - } - else - operands[1] + operands[1] = adjust_address (operands[1], INTVAL (operands[2]) == 8 ? QImode : HImode, INTVAL (operands[3]) / 8); @@ -846,7 +962,8 @@ if (INTVAL (operands[2]) == 8) return \"movzbl %1,%0\"; return \"movzwl %1,%0\"; -}") +}" + [(set_attr "cc" "clobber")]) (define_insn "" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") @@ -875,7 +992,8 @@ if (INTVAL (operands[2]) == 8) return \"cvtbl %1,%0\"; return \"cvtwl %1,%0\"; -}") +}" + [(set_attr "cc" "compare")]) ;; Register-only SImode cases of bit-field insns. @@ -887,7 +1005,8 @@ (match_operand:SI 2 "general_operand" "nrmT")) (match_operand:SI 3 "general_operand" "nrmT")))] "" - "cmpv %2,%1,%0,%3") + "cmpv %2,%1,%0,%3" + [(set_attr "cc" "clobber")]) (define_insn "" [(set (cc0) @@ -897,7 +1016,8 @@ (match_operand:SI 2 "general_operand" "nrmT")) (match_operand:SI 3 "general_operand" "nrmT")))] "" - "cmpzv %2,%1,%0,%3") + "cmpzv %2,%1,%0,%3" + [(set_attr "cc" "clobber")]) ;; When the field position and size are constant and the destination ;; is a register, extv and extzv are much slower than a rotate followed @@ -919,29 +1039,47 @@ if (INTVAL (operands[2]) == 8) return \"rotl %R3,%1,%0\;cvtbl %0,%0\"; return \"rotl %R3,%1,%0\;cvtwl %0,%0\"; -}") +}" + [(set_attr "cc" "compare")]) +;; Split into two insns to mark the movzbl/movzwl variants as clobber. (define_insn "" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (zero_extract:SI (match_operand:SI 1 "register_operand" "ro") (match_operand:QI 2 "general_operand" "g") (match_operand:SI 3 "general_operand" "nrmT")))] - "" + "! CONST_INT_P (operands[3]) || ! CONST_INT_P (operands[2]) + || ! REG_P (operands[0]) + || (INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16)" "* { if (! CONST_INT_P (operands[3]) || ! CONST_INT_P (operands[2]) || ! REG_P (operands[0])) return \"extzv %3,%2,%1,%0\"; - if (INTVAL (operands[2]) == 8) - return \"rotl %R3,%1,%0\;movzbl %0,%0\"; - if (INTVAL (operands[2]) == 16) - return \"rotl %R3,%1,%0\;movzwl %0,%0\"; if (INTVAL (operands[3]) & 31) return \"rotl %R3,%1,%0\;bicl2 %M2,%0\"; if (rtx_equal_p (operands[0], operands[1])) return \"bicl2 %M2,%0\"; return \"bicl3 %M2,%1,%0\"; -}") +}" + [(set_attr "cc" "compare")]) + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=g") + (zero_extract:SI (match_operand:SI 1 "register_operand" "ro") + (match_operand:QI 2 "general_operand" "g") + (match_operand:SI 3 "general_operand" "nrmT")))] + "INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16" + "* +{ + if (INTVAL (operands[2]) == 8) + return \"rotl %R3,%1,%0\;movzbl %0,%0\"; + else if (INTVAL (operands[2]) == 16) + return \"rotl %R3,%1,%0\;movzwl %0,%0\"; + else + gcc_unreachable (); +}" + [(set_attr "cc" "clobber")]) ;; Non-register cases. ;; nonimmediate_operand is used to make sure that mode-ambiguous cases @@ -955,7 +1093,8 @@ (match_operand:SI 2 "general_operand" "nrmT")) (match_operand:SI 3 "general_operand" "nrmT")))] "" - "cmpv %2,%1,%0,%3") + "cmpv %2,%1,%0,%3" + [(set_attr "cc" "clobber")]) (define_insn "" [(set (cc0) @@ -965,7 +1104,8 @@ (match_operand:SI 2 "general_operand" "nrmT")) (match_operand:SI 3 "general_operand" "nrmT")))] "" - "cmpzv %2,%1,%0,%3") + "cmpzv %2,%1,%0,%3" + [(set_attr "cc" "clobber")]) (define_insn "extv" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") @@ -987,7 +1127,8 @@ if (INTVAL (operands[2]) == 8) return \"rotl %R3,%1,%0\;cvtbl %0,%0\"; return \"rotl %R3,%1,%0\;cvtwl %0,%0\"; -}") +}" + [(set_attr "cc" "compare")]) (define_expand "extzv" [(set (match_operand:SI 0 "general_operand" "") @@ -997,26 +1138,49 @@ "" "") +;; This was split into three insns so the CC attribute can be set correctly. (define_insn "" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (zero_extract:SI (match_operand:QI 1 "memory_operand" "m") (match_operand:QI 2 "general_operand" "g") (match_operand:SI 3 "general_operand" "nrmT")))] - "" + "! REG_P (operands[0]) || ! CONST_INT_P (operands[2]) + || ! CONST_INT_P (operands[3]) + || INTVAL (operands[2]) + INTVAL (operands[3]) > 32 + || side_effects_p (operands[1]) + || (MEM_P (operands[1]) + && mode_dependent_address_p (XEXP (operands[1], 0), + MEM_ADDR_SPACE (operands[1])))" + "extzv %3,%2,%1,%0" + [(set_attr "cc" "compare")]) + +;; These insns end in movzbl/movzwl, so CC can't be used for comparison. +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=g") + (zero_extract:SI (match_operand:QI 1 "memory_operand" "m") + (match_operand:QI 2 "general_operand" "g") + (match_operand:SI 3 "general_operand" "nrmT")))] + "INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16" "* { - if (! REG_P (operands[0]) || ! CONST_INT_P (operands[2]) - || ! CONST_INT_P (operands[3]) - || INTVAL (operands[2]) + INTVAL (operands[3]) > 32 - || side_effects_p (operands[1]) - || (MEM_P (operands[1]) - && mode_dependent_address_p (XEXP (operands[1], 0), - MEM_ADDR_SPACE (operands[1])))) - return \"extzv %3,%2,%1,%0\"; if (INTVAL (operands[2]) == 8) return \"rotl %R3,%1,%0\;movzbl %0,%0\"; - if (INTVAL (operands[2]) == 16) + else if (INTVAL (operands[2]) == 16) return \"rotl %R3,%1,%0\;movzwl %0,%0\"; + else + gcc_unreachable (); +}" + [(set_attr "cc" "clobber")]) + +;; The final insn in these sequences should set Z and N correctly. +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=g") + (zero_extract:SI (match_operand:QI 1 "memory_operand" "m") + (match_operand:QI 2 "general_operand" "g") + (match_operand:SI 3 "general_operand" "nrmT")))] + "" + "* +{ if (MEM_P (operands[1]) && GET_CODE (XEXP (operands[1], 0)) == PLUS && REG_P (XEXP (XEXP (operands[1], 0), 0)) @@ -1040,16 +1204,21 @@ return \"extzv %3,%2,%1,%0\"; } return \"rotl %R3,%1,%0\;bicl2 %M2,%0\"; -}") +}" + [(set_attr "cc" "compare")]) -(define_expand "insv" +;; Changed name from "insv" to "insvsi" to match standard name for type. +(define_expand "insvsi" [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "") (match_operand:QI 1 "general_operand" "") (match_operand:SI 2 "general_operand" "")) - (match_operand:SI 3 "general_operand" ""))] - "" - "") + (match_operand:SI 3 "general_operand" ""))]) +;; The final insv doesn't touch any flags, but the added instructions to +;; prepare the operands for it probably will. Also, the condition codes +;; are unpredictable if a reserved operand fault occurs due to size or +;; pos being out of the range of the 32-bit destination field. +;; It's best to tag the entire sequence as clobbering cc. (define_insn "" [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+g") (match_operand:QI 1 "general_operand" "g") @@ -1079,22 +1248,29 @@ } } return \"insv %3,%2,%1,%0\"; -}") +}" + [(set_attr "cc" "clobber")]) +;; FIXME: this could raise a reserved operand fault if %1 or %3 are +;; out of the acceptable ranges for a 32-bit field. Under normal +;; circumstances, the condition codes should all be unmodified, which +;; is unusual for a VAX instruction that moves data. (define_insn "" [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r") (match_operand:QI 1 "general_operand" "g") (match_operand:SI 2 "general_operand" "nrmT")) (match_operand:SI 3 "general_operand" "nrmT"))] "" - "insv %3,%2,%1,%0") + "insv %3,%2,%1,%0" + [(set_attr "cc" "none")]) ;; Unconditional jump (define_insn "jump" [(set (pc) (label_ref (match_operand 0 "" "")))] "" - "jbr %l0") + "jbr %l0" + [(set_attr "cc" "clobber")]) ;; Conditional jumps @@ -1130,7 +1306,8 @@ (label_ref (match_operand 1 "" "")) (pc)))] "" - "j%c0 %l1") + "j%c0 %l1" + [(set_attr "cc" "none")]) ;; Recognize reversed jumps. (define_insn "*branch_reversed" @@ -1141,7 +1318,8 @@ (pc) (label_ref (match_operand 1 "" ""))))] "" - "j%C0 %l1") ; %C0 negates condition + "j%C0 %l1" ; %C0 negates condition + [(set_attr "cc" "none")]) ;; Recognize jbs, jlbs, jbc and jlbc instructions. Note that the operand ;; of jlbs and jlbc insns are SImode in the hardware. However, if it is @@ -1160,7 +1338,8 @@ "" "@ jlbs %0,%l2 - jbs %1,%0,%l2") + jbs %1,%0,%l2" + [(set_attr "cc" "none")]) (define_insn "" [(set (pc) @@ -1174,7 +1353,8 @@ "" "@ jlbc %0,%l2 - jbc %1,%0,%l2") + jbc %1,%0,%l2" + [(set_attr "cc" "none")]) (define_insn "" [(set (pc) @@ -1188,7 +1368,8 @@ "" "@ jlbs %0,%l2 - jbs %1,%0,%l2") + jbs %1,%0,%l2" + [(set_attr "cc" "none")]) (define_insn "" [(set (pc) @@ -1202,7 +1383,8 @@ "" "@ jlbc %0,%l2 - jbc %1,%0,%l2") + jbc %1,%0,%l2" + [(set_attr "cc" "none")]) ;; Subtract-and-jump and Add-and-jump insns. ;; These are not used when output is for the Unix assembler @@ -1216,13 +1398,14 @@ (gt (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g") (const_int -1)) (const_int 0)) - (label_ref (match_operand 1 "" "")) + (label_ref (match_operand 1)) (pc))) (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))] "!TARGET_UNIX_ASM" - "jsobgtr %0,%l1") + "jsobgtr %0,%l1" + [(set_attr "cc" "compare")]) (define_insn "" [(set (pc) @@ -1230,13 +1413,14 @@ (ge (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g") (const_int -1)) (const_int 0)) - (label_ref (match_operand 1 "" "")) + (label_ref (match_operand 1)) (pc))) (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))] "!TARGET_UNIX_ASM" - "jsobgeq %0,%l1") + "jsobgeq %0,%l1" + [(set_attr "cc" "compare")]) ;; Normal aob insns. Define a version for when operands[1] is a constant. (define_insn "" @@ -1245,26 +1429,28 @@ (lt (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g") (const_int 1)) (match_operand:SI 1 "general_operand" "nrmT")) - (label_ref (match_operand 2 "" "")) + (label_ref (match_operand 2)) (pc))) (set (match_dup 0) (plus:SI (match_dup 0) (const_int 1)))] "!TARGET_UNIX_ASM" - "jaoblss %1,%0,%l2") + "jaoblss %1,%0,%l2" + [(set_attr "cc" "compare")]) (define_insn "" [(set (pc) (if_then_else (lt (match_operand:SI 0 "nonimmediate_operand" "+g") (match_operand:SI 1 "general_operand" "nrmT")) - (label_ref (match_operand 2 "" "")) + (label_ref (match_operand 2)) (pc))) (set (match_dup 0) (plus:SI (match_dup 0) (const_int 1)))] "!TARGET_UNIX_ASM && CONST_INT_P (operands[1])" - "jaoblss %P1,%0,%l2") + "jaoblss %P1,%0,%l2" + [(set_attr "cc" "compare")]) (define_insn "" [(set (pc) @@ -1272,26 +1458,28 @@ (le (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g") (const_int 1)) (match_operand:SI 1 "general_operand" "nrmT")) - (label_ref (match_operand 2 "" "")) + (label_ref (match_operand 2)) (pc))) (set (match_dup 0) (plus:SI (match_dup 0) (const_int 1)))] "!TARGET_UNIX_ASM" - "jaobleq %1,%0,%l2") + "jaobleq %1,%0,%l2" + [(set_attr "cc" "compare")]) (define_insn "" [(set (pc) (if_then_else (le (match_operand:SI 0 "nonimmediate_operand" "+g") (match_operand:SI 1 "general_operand" "nrmT")) - (label_ref (match_operand 2 "" "")) + (label_ref (match_operand 2)) (pc))) (set (match_dup 0) (plus:SI (match_dup 0) (const_int 1)))] "!TARGET_UNIX_ASM && CONST_INT_P (operands[1])" - "jaobleq %P1,%0,%l2") + "jaobleq %P1,%0,%l2" + [(set_attr "cc" "compare")]) ;; Something like a sob insn, but compares against -1. ;; This finds `while (foo--)' which was changed to `while (--foo != -1)'. @@ -1307,8 +1495,14 @@ (plus:SI (match_dup 0) (const_int -1)))] "" - "decl %0\;jgequ %l1") + "decl %0\;jgequ %l1" + [(set_attr "cc" "compare")]) +;; Note that operand 1 is total size of args, in bytes, +;; and what the call insn wants is the number of words. +;; It is used in the call instruction as a byte, but in the addl2 as +;; a word. Since the only time we actually use it in the call instruction +;; is when it is a constant, SImode (for addl2) is the proper mode. (define_expand "call_pop" [(parallel [(call (match_operand:QI 0 "memory_operand" "") (match_operand:SI 1 "const_int_operand" "")) @@ -1317,12 +1511,7 @@ (match_operand:SI 3 "immediate_operand" "")))])] "" { - gcc_assert (INTVAL (operands[3]) <= 255 * 4 && INTVAL (operands[3]) % 4 == 0); - - /* Operand 1 is the number of bytes to be popped by DW_CFA_GNU_args_size - during EH unwinding. We must include the argument count pushed by - the calls instruction. */ - operands[1] = GEN_INT (INTVAL (operands[3]) + 4); + gcc_assert (INTVAL (operands[1]) <= 255 * 4); }) (define_insn "*call_pop" @@ -1332,9 +1521,11 @@ (match_operand:SI 2 "immediate_operand" "i")))] "" { - operands[1] = GEN_INT ((INTVAL (operands[1]) - 4) / 4); + operands[1] = GEN_INT ((INTVAL (operands[1]) + 3) / 4); return "calls %1,%0"; -}) +} + [(set_attr "cc" "none")]) + (define_expand "call_value_pop" [(parallel [(set (match_operand 0 "" "") @@ -1345,12 +1536,7 @@ (match_operand:SI 4 "immediate_operand" "")))])] "" { - gcc_assert (INTVAL (operands[4]) <= 255 * 4 && INTVAL (operands[4]) % 4 == 0); - - /* Operand 2 is the number of bytes to be popped by DW_CFA_GNU_args_size - during EH unwinding. We must include the argument count pushed by - the calls instruction. */ - operands[2] = GEN_INT (INTVAL (operands[4]) + 4); + gcc_assert (INTVAL (operands[2]) <= 255 * 4); }) (define_insn "*call_value_pop" @@ -1360,49 +1546,29 @@ (set (reg:SI VAX_SP_REGNUM) (plus:SI (reg:SI VAX_SP_REGNUM) (match_operand:SI 3 "immediate_operand" "i")))] "" - "* { - operands[2] = GEN_INT ((INTVAL (operands[2]) - 4) / 4); - return \"calls %2,%1\"; -}") + operands[2] = GEN_INT ((INTVAL (operands[2]) + 3) / 4); + return "calls %2,%1"; +} + [(set_attr "cc" "none")]) -(define_expand "call" - [(call (match_operand:QI 0 "memory_operand" "") - (match_operand:SI 1 "const_int_operand" ""))] - "" - " -{ - /* Operand 1 is the number of bytes to be popped by DW_CFA_GNU_args_size - during EH unwinding. We must include the argument count pushed by - the calls instruction. */ - operands[1] = GEN_INT (INTVAL (operands[1]) + 4); -}") - -(define_insn "*call" - [(call (match_operand:QI 0 "memory_operand" "m") - (match_operand:SI 1 "const_int_operand" ""))] - "" - "calls $0,%0") -(define_expand "call_value" - [(set (match_operand 0 "" "") - (call (match_operand:QI 1 "memory_operand" "") - (match_operand:SI 2 "const_int_operand" "")))] +;; Define another set of these for the case of functions with no operands. +;; These will allow the optimizers to do a slightly better job. +(define_insn "call" + [(call (match_operand:QI 0 "memory_operand" "m") + (const_int 0))] "" - " -{ - /* Operand 2 is the number of bytes to be popped by DW_CFA_GNU_args_size - during EH unwinding. We must include the argument count pushed by - the calls instruction. */ - operands[2] = GEN_INT (INTVAL (operands[2]) + 4); -}") + "calls $0,%0" + [(set_attr "cc" "none")]) -(define_insn "*call_value" +(define_insn "call_value" [(set (match_operand 0 "" "") (call (match_operand:QI 1 "memory_operand" "m") - (match_operand:SI 2 "const_int_operand" "")))] + (const_int 0)))] "" - "calls $0,%1") + "calls $0,%1" + [(set_attr "cc" "none")]) ;; Call subroutine returning any type. @@ -1439,7 +1605,8 @@ (define_insn "blockage" [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)] "" - "") + "" + [(set_attr "cc" "clobber")]) (define_insn "procedure_entry_mask" [(unspec_volatile [(match_operand 0 "const_int_operand")] VUNSPEC_PEM)] @@ -1449,7 +1616,8 @@ (define_insn "return" [(return)] "" - "ret") + "ret" + [(set_attr "cc" "clobber")]) (define_expand "prologue" [(const_int 0)] @@ -1471,7 +1639,8 @@ (define_insn "nop" [(const_int 0)] "" - "nop") + "nop" + [(set_attr "cc" "none")]) ;; This had a wider constraint once, and it had trouble. ;; If you are tempted to try `g', please don't--it's not worth @@ -1479,7 +1648,8 @@ (define_insn "indirect_jump" [(set (pc) (match_operand:SI 0 "register_operand" "r"))] "" - "jmp (%0)") + "jmp (%0)" + [(set_attr "cc" "clobber")]) ;; This is here to accept 5 arguments (as passed by expand_end_case) ;; and pass the first 4 along to the casesi1 pattern that really does @@ -1542,31 +1712,43 @@ (pc)))) (label_ref:SI (match_operand 2 "" ""))))] "" - "casel %0,$0,%1") + "casel %0,$0,%1" + [(set_attr "cc" "clobber")]) +;; Note that the flags will be set based on the value pushed to stack %0, +;; which will be the address of operand 1. V is cleared, C is unmodified. +;; I believe these pattern names are used to patch up linker references to +;; external symbols in shared libraries. If the assembler or linker ever +;; converts such a reference to more than the single "pushab" operand, +;; then cc should be changed to "clobber". If it's only used to patch up +;; a numeric reference, and the optimizer isn't confused, VZN should work. (define_insn "pushextsym" [(set (match_operand:SI 0 "push_operand" "=g") (match_operand:SI 1 "external_symbolic_operand" "i"))] "" - "pushab %a1") + "pushab %a1" + [(set_attr "cc" "compare")]) (define_insn "movextsym" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (match_operand:SI 1 "external_symbolic_operand" "i"))] "" - "movab %a1,%0") + "movab %a1,%0" + [(set_attr "cc" "compare")]) (define_insn "pushlclsym" [(set (match_operand:SI 0 "push_operand" "=g") (match_operand:SI 1 "local_symbolic_operand" "i"))] "" - "pushab %a1") + "pushab %a1" + [(set_attr "cc" "compare")]) (define_insn "movlclsym" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (match_operand:SI 1 "local_symbolic_operand" "i"))] "" - "movab %a1,%0") + "movab %a1,%0" + [(set_attr "cc" "compare")]) ;;- load or push effective address ;; These come after the move and add/sub patterns @@ -1580,25 +1762,29 @@ [(set (match_operand:SI 0 "push_operand" "=g") (match_operand:VAXintQHSD 1 "address_operand" "p"))] "" - "pusha<VAXintQHSD:isfx> %a1") + "pusha<VAXintQHSD:isfx> %a1" + [(set_attr "cc" "compare")]) (define_insn "movaddr<mode>" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (match_operand:VAXintQHSD 1 "address_operand" "p"))] "" - "mova<VAXintQHSD:isfx> %a1,%0") + "mova<VAXintQHSD:isfx> %a1,%0" + [(set_attr "cc" "compare")]) (define_insn "pushaddr<mode>" [(set (match_operand:SI 0 "push_operand" "=g") (match_operand:VAXfp 1 "address_operand" "p"))] "" - "pusha<VAXfp:fsfx> %a1") + "pusha<VAXfp:fsfx> %a1" + [(set_attr "cc" "compare")]) (define_insn "movaddr<mode>" [(set (match_operand:SI 0 "nonimmediate_operand" "=g") (match_operand:VAXfp 1 "address_operand" "p"))] "" - "mova<VAXfp:fsfx> %a1,%0") + "mova<VAXfp:fsfx> %a1,%0" + [(set_attr "cc" "compare")]) ;; These used to be peepholes, but it is more straightforward to do them ;; as single insns. However, we must force the output to be a register @@ -1628,7 +1814,8 @@ operands[3] = GEN_INT (mask1 & mask2); return \"rotl %R2,%1,%0\;bicl2 %N3,%0\"; -}") +}" + [(set_attr "cc" "compare")]) ;; left-shift and mask ;; The only case where `ashl' is better is if the mask only turns off @@ -1646,13 +1833,15 @@ operands[3] = GEN_INT (INTVAL (operands[3]) & ~((1 << INTVAL (operands[2])) - 1)); return \"rotl %2,%1,%0\;bicl2 %N3,%0\"; -}") +}" + [(set_attr "cc" "compare")]) ;; Instruction sequence to sync the VAX instruction stream. (define_insn "sync_istream" [(unspec_volatile [(const_int 0)] VUNSPEC_SYNC_ISTREAM)] "" - "movpsl -(%|sp)\;pushal 1(%|pc)\;rei") + "movpsl -(%|sp)\;pushal 1(%|pc)\;rei" + [(set_attr "cc" "clobber")]) (define_expand "nonlocal_goto" [(use (match_operand 0 "general_operand" "")) @@ -1682,47 +1871,6 @@ (include "builtins.md") -(define_peephole2 - [(set (match_operand:SI 0 "push_operand" "") - (const_int 0)) - (set (match_dup 0) - (match_operand:SI 1 "const_int_operand" ""))] - "INTVAL (operands[1]) >= 0" - [(set (match_dup 0) - (match_dup 1))] - "operands[0] = gen_rtx_MEM(DImode, XEXP (operands[0], 0));") - -(define_peephole2 - [(set (match_operand:SI 0 "push_operand" "") - (match_operand:SI 1 "general_operand" "")) - (set (match_dup 0) - (match_operand:SI 2 "general_operand" ""))] - "vax_decomposed_dimode_operand_p (operands[2], operands[1])" - [(set (match_dup 0) - (match_dup 2))] - "{ - operands[0] = gen_rtx_MEM(DImode, XEXP (operands[0], 0)); - operands[2] = REG_P (operands[2]) - ? gen_rtx_REG(DImode, REGNO (operands[2])) - : gen_rtx_MEM(DImode, XEXP (operands[2], 0)); -}") - -; Leave this commented out until we can determine whether the second move -; precedes a jump which relies on the CC flags being set correctly. -(define_peephole2 - [(set (match_operand:SI 0 "nonimmediate_operand" "") - (match_operand:SI 1 "general_operand" "")) - (set (match_operand:SI 2 "nonimmediate_operand" "") - (match_operand:SI 3 "general_operand" ""))] - "0 && vax_decomposed_dimode_operand_p (operands[1], operands[3]) - && vax_decomposed_dimode_operand_p (operands[0], operands[2])" - [(set (match_dup 0) - (match_dup 1))] - "{ - operands[0] = REG_P (operands[0]) - ? gen_rtx_REG(DImode, REGNO (operands[0])) - : gen_rtx_MEM(DImode, XEXP (operands[0], 0)); - operands[1] = REG_P (operands[1]) - ? gen_rtx_REG(DImode, REGNO (operands[1])) - : gen_rtx_MEM(DImode, XEXP (operands[1], 0)); -}") +;; The earlier peephole definitions have been removed because they weren't +;; setting the flags equivalently to the 32-bit instructions they replaced. +;; TODO: replace them after fixing the condition code notify function.