Revision: 20276
Author: [email protected]
Date: Wed Mar 26 12:15:35 2014 UTC
Log: Introduce andp, notp, orp and xorp for x64 port
[email protected]
Review URL: https://codereview.chromium.org/205343013
http://code.google.com/p/v8/source/detail?r=20276
Modified:
/branches/bleeding_edge/src/x64/assembler-x64.cc
/branches/bleeding_edge/src/x64/assembler-x64.h
/branches/bleeding_edge/src/x64/builtins-x64.cc
/branches/bleeding_edge/src/x64/code-stubs-x64.cc
/branches/bleeding_edge/src/x64/codegen-x64.cc
/branches/bleeding_edge/src/x64/full-codegen-x64.cc
/branches/bleeding_edge/src/x64/ic-x64.cc
/branches/bleeding_edge/src/x64/lithium-codegen-x64.cc
/branches/bleeding_edge/src/x64/macro-assembler-x64.cc
/branches/bleeding_edge/src/x64/macro-assembler-x64.h
/branches/bleeding_edge/src/x64/regexp-macro-assembler-x64.cc
/branches/bleeding_edge/src/x64/stub-cache-x64.cc
/branches/bleeding_edge/test/cctest/test-assembler-x64.cc
/branches/bleeding_edge/test/cctest/test-disasm-x64.cc
/branches/bleeding_edge/test/cctest/test-macro-assembler-x64.cc
=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.cc Wed Mar 26 11:17:53
2014 UTC
+++ /branches/bleeding_edge/src/x64/assembler-x64.cc Wed Mar 26 12:15:35
2014 UTC
@@ -1551,28 +1551,20 @@
}
-void Assembler::not_(Register dst) {
+void Assembler::emit_not(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x2, dst);
}
-void Assembler::not_(const Operand& dst) {
+void Assembler::emit_not(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_operand(2, dst);
}
-
-
-void Assembler::notl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
void Assembler::Nop(int n) {
=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.h Wed Mar 26 11:17:53
2014 UTC
+++ /branches/bleeding_edge/src/x64/assembler-x64.h Wed Mar 26 12:15:35
2014 UTC
@@ -511,6 +511,7 @@
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(add) \
+ V(and) \
V(cmp) \
V(dec) \
V(idiv) \
@@ -521,11 +522,14 @@
V(movzxb) \
V(movzxw) \
V(neg) \
+ V(not) \
+ V(or) \
V(repmovs) \
V(sbb) \
V(sub) \
V(test) \
- V(xchg)
+ V(xchg) \
+ V(xor)
class Assembler : public AssemblerBase {
@@ -674,9 +678,7 @@
// - Instructions on 16-bit (word) operands/registers have a
trailing 'w'.
// - Instructions on 32-bit (doubleword) operands/registers use 'l'.
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
- //
- // Some mnemonics, such as "and", are the same as C++ keywords.
- // Naming conflicts with C++ keywords are resolved by adding a
trailing '_'.
+ // - Instructions on operands/registers with pointer size use 'p'.
#define DECLARE_INSTRUCTION(instruction) \
template<class P1> \
@@ -838,38 +840,6 @@
void cmpw(const Operand& dst, Register src) {
arithmetic_op_16(0x39, src, dst);
}
-
- void and_(Register dst, Register src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(Register dst, const Operand& src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(const Operand& dst, Register src) {
- arithmetic_op(0x21, src, dst);
- }
-
- void and_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void and_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void andl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x4, dst, src);
- }
-
- void andl(Register dst, Register src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andl(Register dst, const Operand& src) {
- arithmetic_op_32(0x23, dst, src);
- }
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
@@ -886,50 +856,6 @@
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
- void not_(Register dst);
- void not_(const Operand& dst);
- void notl(Register dst);
-
- void or_(Register dst, Register src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, Register src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(Register dst, const Operand& src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, const Operand& src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(const Operand& dst, Register src) {
- arithmetic_op(0x09, src, dst);
- }
-
- void orl(const Operand& dst, Register src) {
- arithmetic_op_32(0x09, src, dst);
- }
-
- void or_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
- void or_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
void rcl(Register dst, Immediate imm8) {
shift(dst, imm8, 0x2);
}
@@ -1030,50 +956,6 @@
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
- void xor_(Register dst, Register src) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
- }
-
- void xorl(Register dst, Register src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, const Operand& src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xorl(const Operand& dst, Register src) {
- arithmetic_op_32(0x31, src, dst);
- }
-
- void xorl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xor_(Register dst, const Operand& src) {
- arithmetic_op(0x33, dst, src);
- }
-
- void xor_(const Operand& dst, Register src) {
- arithmetic_op(0x31, src, dst);
- }
-
- void xor_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- void xor_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
@@ -1634,6 +1516,51 @@
immediate_arithmetic_op_32(0x0, dst, src);
}
}
+
+ void emit_and(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x23, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x23, dst, src);
+ }
+ }
+
+ void emit_and(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x23, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x23, dst, src);
+ }
+ }
+
+ void emit_and(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x21, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x21, src, dst);
+ }
+ }
+
+ void emit_and(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x4, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+ }
+
+ void emit_and(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x4, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+ }
void emit_cmp(Register dst, Register src, int size) {
if (size == kInt64Size) {
@@ -1712,6 +1639,49 @@
void emit_neg(Register dst, int size);
void emit_neg(const Operand& dst, int size);
+
+ void emit_not(Register dst, int size);
+ void emit_not(const Operand& dst, int size);
+
+ void emit_or(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x0B, dst, src);
+ } else {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+ }
+
+ void emit_or(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x0B, dst, src);
+ } else {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+ }
+
+ void emit_or(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x9, src, dst);
+ } else {
+ arithmetic_op_32(0x9, src, dst);
+ }
+ }
+
+ void emit_or(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x1, dst, src);
+ } else {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+ }
+
+ void emit_or(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x1, dst, src);
+ } else {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+ }
void emit_repmovs(int size);
@@ -1777,6 +1747,55 @@
// Exchange two registers
void emit_xchg(Register dst, Register src, int size);
+ void emit_xor(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ if (dst.code() == src.code()) {
+ arithmetic_op_32(0x33, dst, src);
+ } else {
+ arithmetic_op(0x33, dst, src);
+ }
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x33, dst, src);
+ }
+ }
+
+ void emit_xor(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x33, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x33, dst, src);
+ }
+ }
+
+ void emit_xor(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x6, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+ }
+
+ void emit_xor(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x6, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+ }
+
+ void emit_xor(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x31, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x31, src, dst);
+ }
+ }
+
friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
=======================================
--- /branches/bleeding_edge/src/x64/builtins-x64.cc Wed Mar 26 11:17:53
2014 UTC
+++ /branches/bleeding_edge/src/x64/builtins-x64.cc Wed Mar 26 12:15:35
2014 UTC
@@ -278,7 +278,7 @@
// rax: initial map
// rbx: JSObject
// rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
+ __ orp(rbx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -342,7 +342,7 @@
// the JSObject
// rbx: JSObject
// rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag
__ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
=======================================
--- /branches/bleeding_edge/src/x64/code-stubs-x64.cc Wed Mar 26 11:17:53
2014 UTC
+++ /branches/bleeding_edge/src/x64/code-stubs-x64.cc Wed Mar 26 12:15:35
2014 UTC
@@ -1040,7 +1040,7 @@
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
- __ xor_(r8, r8);
+ __ xorp(r8, r8);
__ testp(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
__ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
@@ -1839,7 +1839,7 @@
__ JumpIfNotBothSmi(rax, rdx, &non_smi);
__ subp(rdx, rax);
__ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
__ movp(rax, rdx);
__ ret(0);
@@ -3119,7 +3119,7 @@
// Find number of bytes left.
__ movl(count, kScratchRegister);
- __ and_(count, Immediate(kPointerSize - 1));
+ __ andp(count, Immediate(kPointerSize - 1));
// Check if there are more bytes to copy.
__ bind(&last_bytes);
@@ -3848,7 +3848,7 @@
__ subp(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ not_(rdx);
+ __ notp(rdx);
__ bind(&done);
__ movp(rax, rdx);
}
@@ -3957,7 +3957,7 @@
__ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
__ j(not_zero, &miss, Label::kNear);
@@ -4047,7 +4047,7 @@
__ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ movp(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
+ __ orp(tmp3, tmp2);
__ testb(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
@@ -4069,7 +4069,7 @@
if (equality) {
Label do_compare;
STATIC_ASSERT(kInternalizedTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotInternalizedMask));
__ j(not_zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
@@ -4193,7 +4193,7 @@
// Capacity is smi 2^n.
__ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
__ decl(index);
- __ and_(index,
+ __ andp(index,
Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
@@ -4264,7 +4264,7 @@
if (i > 0) {
__ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(r1, r0);
+ __ andp(r1, r0);
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
@@ -4325,7 +4325,7 @@
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(scratch, Operand(rsp, 0));
+ __ andp(scratch, Operand(rsp, 0));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
@@ -4504,7 +4504,7 @@
Label need_incremental_pop_object;
__ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
+ __ andp(regs_.scratch0(), regs_.object());
__ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
@@ -4942,7 +4942,7 @@
__ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
+ __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -5016,7 +5016,7 @@
// but the following masking takes care of that anyway.
__ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
+ __ andp(rcx, Immediate(Map::kElementsKindMask));
__ shr(rcx, Immediate(Map::kElementsKindShift));
if (FLAG_debug_code) {
=======================================
--- /branches/bleeding_edge/src/x64/codegen-x64.cc Wed Mar 26 11:17:53 2014
UTC
+++ /branches/bleeding_edge/src/x64/codegen-x64.cc Wed Mar 26 12:15:35 2014
UTC
@@ -607,12 +607,12 @@
__ subsd(double_scratch, result);
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
__ leaq(temp1, Operand(temp2, 0x1ff800));
- __ and_(temp2, Immediate(0x7ff));
+ __ andq(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shl(temp1, Immediate(52));
- __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
+ __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
__ movsd(input, double_scratch);
=======================================
--- /branches/bleeding_edge/src/x64/full-codegen-x64.cc Wed Mar 26 11:17:53
2014 UTC
+++ /branches/bleeding_edge/src/x64/full-codegen-x64.cc Wed Mar 26 12:15:35
2014 UTC
@@ -1013,7 +1013,7 @@
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
- __ or_(rcx, rax);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpp(rdx, rax);
@@ -2311,7 +2311,7 @@
Label done, stub_call, smi_case;
__ Pop(rdx);
__ movp(rcx, rax);
- __ or_(rax, rdx);
+ __ orp(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
@@ -3056,7 +3056,7 @@
__ bind(&done);
// Set the bit in the map to indicate that there is no local valueOf
field.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ __ orp(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ bind(&skip_lookup);
@@ -4658,7 +4658,7 @@
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
- __ or_(rcx, rax);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpp(rdx, rax);
Split(cc, if_true, if_false, NULL);
=======================================
--- /branches/bleeding_edge/src/x64/ic-x64.cc Wed Mar 26 11:17:53 2014 UTC
+++ /branches/bleeding_edge/src/x64/ic-x64.cc Wed Mar 26 12:15:35 2014 UTC
@@ -424,9 +424,9 @@
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift));
- __ xor_(rcx, rdi);
+ __ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask &
KeyedLookupCache::kHashMask);
- __ and_(rcx, Immediate(mask));
+ __ andp(rcx, Immediate(mask));
// Load the key (consisting of map and internalized string) from the
cache and
// check for match.
=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Wed Mar 26
11:17:53 2014 UTC
+++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Wed Mar 26
12:15:35 2014 UTC
@@ -1403,7 +1403,7 @@
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToOperand(right));
+ __ orp(kScratchRegister, ToOperand(right));
} else {
__ orl(kScratchRegister, ToOperand(right));
}
@@ -1411,7 +1411,7 @@
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToRegister(right));
+ __ orp(kScratchRegister, ToRegister(right));
} else {
__ orl(kScratchRegister, ToRegister(right));
}
@@ -1451,13 +1451,13 @@
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
+ __ andp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
+ __ orp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
+ __ xorp(ToRegister(left), ToOperand(right));
break;
default:
UNREACHABLE();
@@ -1467,13 +1467,13 @@
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToRegister(right));
+ __ andp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToRegister(right));
+ __ orp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToRegister(right));
+ __ xorp(ToRegister(left), ToRegister(right));
break;
default:
UNREACHABLE();
=======================================
--- /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Wed Mar 26
11:17:53 2014 UTC
+++ /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Wed Mar 26
12:15:35 2014 UTC
@@ -276,10 +276,10 @@
// and the running system.
if (scratch.is(object)) {
Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
+ andp(scratch, kScratchRegister);
} else {
Move(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
+ andp(scratch, object);
}
Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpp(scratch, kScratchRegister);
@@ -295,7 +295,7 @@
} else {
leap(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch,
+ andp(scratch,
Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
@@ -576,7 +576,7 @@
// the slow case, converting the key to a smi is always valid.
// key: string key
// hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
+ andp(hash, Immediate(String::kArrayIndexValueMask));
shr(hash, Immediate(String::kHashShift));
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a
string key
@@ -1002,7 +1002,7 @@
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(dst, kScratchRegister);
+ xorq(dst, kScratchRegister);
} else {
Move(dst, src);
}
@@ -1014,7 +1014,7 @@
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Push(Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(Operand(rsp, 0), kScratchRegister);
+ xorq(Operand(rsp, 0), kScratchRegister);
} else {
Push(src);
}
@@ -1255,12 +1255,12 @@
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
movp(kScratchRegister, src1);
- or_(kScratchRegister, src2);
+ orp(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
movp(dst, kScratchRegister);
} else {
movp(dst, src1);
- or_(dst, src2);
+ orp(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
}
@@ -1307,7 +1307,7 @@
return CheckNonNegativeSmi(first);
}
movp(kScratchRegister, first);
- or_(kScratchRegister, second);
+ orp(kScratchRegister, second);
rol(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
@@ -1799,7 +1799,7 @@
j(not_zero, &correct_result, Label::kNear);
movp(dst, kScratchRegister);
- xor_(dst, src2);
+ xorp(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
@@ -1823,7 +1823,7 @@
// One of src1 and src2 is zero, the check whether the other is
// negative.
movp(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
+ xorp(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
}
@@ -1955,11 +1955,11 @@
// Set tag and padding bits before negating, so that they are zero
afterwards.
movl(kScratchRegister, Immediate(~0));
if (dst.is(src)) {
- xor_(dst, kScratchRegister);
+ xorp(dst, kScratchRegister);
} else {
leap(dst, Operand(src, kScratchRegister, times_1, 0));
}
- not_(dst);
+ notp(dst);
}
@@ -1968,7 +1968,7 @@
if (!dst.is(src1)) {
movp(dst, src1);
}
- and_(dst, src2);
+ andp(dst, src2);
}
@@ -1978,10 +1978,10 @@
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
+ andp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- and_(dst, src);
+ andp(dst, src);
}
}
@@ -1991,7 +1991,7 @@
ASSERT(!src1.is(src2));
movp(dst, src1);
}
- or_(dst, src2);
+ orp(dst, src2);
}
@@ -1999,10 +1999,10 @@
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
+ orp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- or_(dst, src);
+ orp(dst, src);
}
}
@@ -2012,7 +2012,7 @@
ASSERT(!src1.is(src2));
movp(dst, src1);
}
- xor_(dst, src2);
+ xorp(dst, src2);
}
@@ -2020,10 +2020,10 @@
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
+ xorp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- xor_(dst, src);
+ xorp(dst, src);
}
}
@@ -2083,7 +2083,7 @@
}
SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
+ andq(rcx, Immediate(0x1f));
shl_cl(dst);
}
@@ -2172,7 +2172,7 @@
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
+ andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
// If non-zero then both are smis.
j(not_zero, on_not_smis, near_jump);
@@ -2183,10 +2183,10 @@
subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
movp(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
+ xorp(dst, src2);
+ andp(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
+ xorp(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
@@ -2263,7 +2263,7 @@
shr(dst, Immediate(kSmiShift));
// High bits.
shl(dst, Immediate(64 - kSmiShift));
- or_(dst, scratch);
+ orp(dst, scratch);
}
@@ -2309,8 +2309,8 @@
STATIC_ASSERT(8 == kDoubleSize);
movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- and_(scratch, mask);
+ xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
@@ -2333,7 +2333,7 @@
bind(&is_smi);
SmiToInteger32(scratch, object);
- and_(scratch, mask);
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
@@ -3341,7 +3341,7 @@
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
movp(dst, FieldOperand(map, Map::kBitField3Offset));
Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- and_(dst, kScratchRegister);
+ andp(dst, kScratchRegister);
}
@@ -3842,7 +3842,7 @@
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
+ andp(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
@@ -4068,7 +4068,7 @@
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
- and_(r2, r1);
+ andp(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(SeededNumberDictionary::kEntrySize == 3);
@@ -4293,7 +4293,7 @@
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
+ andp(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
cmpp(object, top_operand);
@@ -4329,7 +4329,7 @@
// scratch1 = length * 2 + kObjectAlignmentMask.
leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
subp(scratch1, Immediate(kHeaderAlignment));
}
@@ -4367,7 +4367,7 @@
movl(scratch1, length);
ASSERT(kCharSize == 1);
addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
subp(scratch1, Immediate(kHeaderAlignment));
}
@@ -4720,7 +4720,7 @@
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
- and_(rsp, Immediate(-frame_alignment));
+ andp(rsp, Immediate(-frame_alignment));
movp(Operand(rsp, argument_slots_on_stack * kRegisterSize),
kScratchRegister);
}
@@ -4789,10 +4789,10 @@
Label::Distance condition_met_distance) {
ASSERT(cc == zero || cc == not_zero);
if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
movp(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+ andp(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
@@ -4811,7 +4811,7 @@
Move(scratch, map);
movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
SmiToInteger32(scratch, scratch);
- and_(scratch, Immediate(Map::Deprecated::kMask));
+ andp(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
@@ -4833,7 +4833,7 @@
// rcx = mask | (mask << 1).
leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
@@ -4868,19 +4868,19 @@
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 -
Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
- and_(rcx,
+ andp(rcx,
Immediate((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
addp(bitmap_reg, rcx);
movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
shl_cl(mask_reg);
}
@@ -4961,21 +4961,21 @@
bind(¬_external);
// Sequential string, either ASCII or UC16.
ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
+ andp(length, Immediate(kStringEncodingMask));
+ xorp(length, Immediate(kStringEncodingMask));
addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted
by 2.
imulp(length, FieldOperand(value, String::kLengthOffset));
shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
+ andp(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we
know
// that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
bind(&done);
@@ -5064,7 +5064,7 @@
bind(&loop_again);
movp(current, FieldOperand(current, HeapObject::kMapOffset));
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
- and_(scratch1, Immediate(Map::kElementsKindMask));
+ andp(scratch1, Immediate(Map::kElementsKindMask));
shr(scratch1, Immediate(Map::kElementsKindShift));
cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
=======================================
--- /branches/bleeding_edge/src/x64/macro-assembler-x64.h Fri Mar 21
02:42:10 2014 UTC
+++ /branches/bleeding_edge/src/x64/macro-assembler-x64.h Wed Mar 26
12:15:35 2014 UTC
@@ -1027,7 +1027,7 @@
static const int shift = Field::kShift + kSmiShift;
static const int mask = Field::kMask >> Field::kShift;
shr(reg, Immediate(shift));
- and_(reg, Immediate(mask));
+ andp(reg, Immediate(mask));
shl(reg, Immediate(kSmiShift));
}
=======================================
--- /branches/bleeding_edge/src/x64/regexp-macro-assembler-x64.cc Wed Mar
26 11:17:53 2014 UTC
+++ /branches/bleeding_edge/src/x64/regexp-macro-assembler-x64.cc Wed Mar
26 12:15:35 2014 UTC
@@ -293,8 +293,8 @@
// Mismatch, try case-insensitive match (converting letters to
lower-case).
// I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z',
it's
// a match.
- __ or_(rax, Immediate(0x20)); // Convert match character to
lower-case.
- __ or_(rdx, Immediate(0x20)); // Convert capture character to
lower-case.
+ __ orp(rax, Immediate(0x20)); // Convert match character to
lower-case.
+ __ orp(rdx, Immediate(0x20)); // Convert capture character to
lower-case.
__ cmpb(rax, rdx);
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
@@ -462,7 +462,7 @@
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(equal, on_equal);
@@ -476,7 +476,7 @@
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(not_equal, on_not_equal);
@@ -490,7 +490,7 @@
Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit);
__ leap(rax, Operand(current_character(), -minus));
- __ and_(rax, Immediate(mask));
+ __ andp(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal);
}
@@ -523,7 +523,7 @@
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ movp(rbx, current_character());
- __ and_(rbx, Immediate(kTableMask));
+ __ andp(rbx, Immediate(kTableMask));
index = rbx;
}
__ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
@@ -575,7 +575,7 @@
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -593,7 +593,7 @@
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
=======================================
--- /branches/bleeding_edge/src/x64/stub-cache-x64.cc Wed Mar 26 11:17:53
2014 UTC
+++ /branches/bleeding_edge/src/x64/stub-cache-x64.cc Wed Mar 26 12:15:35
2014 UTC
@@ -89,7 +89,7 @@
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
@@ -195,10 +195,10 @@
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
+ __ xorp(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash
and
// they are always 01 for maps. Also in the two 'and' instructions
below.
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) <<
kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) <<
kHeapObjectTagSize));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
@@ -206,11 +206,11 @@
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) <<
kHeapObjectTagSize));
+ __ xorp(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) <<
kHeapObjectTagSize));
__ subl(scratch, name);
__ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) <<
kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kSecondaryTableSize - 1) <<
kHeapObjectTagSize));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
=======================================
--- /branches/bleeding_edge/test/cctest/test-assembler-x64.cc Fri Mar 21
02:42:10 2014 UTC
+++ /branches/bleeding_edge/test/cctest/test-assembler-x64.cc Wed Mar 26
12:15:35 2014 UTC
@@ -577,7 +577,7 @@
for (int i = 0; i < ELEMENT_COUNT; i++) {
__ movl(rax, Immediate(vec->Get(i)->Int32Value()));
__ shl(rax, Immediate(0x20));
- __ or_(rax, Immediate(vec->Get(++i)->Int32Value()));
+ __ orq(rax, Immediate(vec->Get(++i)->Int32Value()));
__ pushq(rax);
}
=======================================
--- /branches/bleeding_edge/test/cctest/test-disasm-x64.cc Wed Mar 26
11:17:53 2014 UTC
+++ /branches/bleeding_edge/test/cctest/test-disasm-x64.cc Wed Mar 26
12:15:35 2014 UTC
@@ -57,10 +57,10 @@
// Short immediate instructions
__ addq(rax, Immediate(12345678));
- __ or_(rax, Immediate(12345678));
+ __ orq(rax, Immediate(12345678));
__ subq(rax, Immediate(12345678));
- __ xor_(rax, Immediate(12345678));
- __ and_(rax, Immediate(12345678));
+ __ xorq(rax, Immediate(12345678));
+ __ andq(rax, Immediate(12345678));
// ---- This one caused crash
__ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4]
@@ -93,15 +93,15 @@
__ addq(rbx, Immediate(12));
__ nop();
__ nop();
- __ and_(rdx, Immediate(3));
- __ and_(rdx, Operand(rsp, 4));
+ __ andq(rdx, Immediate(3));
+ __ andq(rdx, Operand(rsp, 4));
__ cmpq(rdx, Immediate(3));
__ cmpq(rdx, Operand(rsp, 4));
__ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000));
__ cmpb(rbx, Operand(rbp, rcx, times_2, 0));
__ cmpb(Operand(rbp, rcx, times_2, 0), rbx);
- __ or_(rdx, Immediate(3));
- __ xor_(rdx, Immediate(3));
+ __ orq(rdx, Immediate(3));
+ __ xorq(rdx, Immediate(3));
__ nop();
__ cpuid();
__ movsxbq(rdx, Operand(rcx, 0));
@@ -159,7 +159,7 @@
__ idivq(rdx);
__ mul(rdx);
__ negq(rdx);
- __ not_(rdx);
+ __ notq(rdx);
__ testq(Operand(rbx, rcx, times_4, 10000), rdx);
__ imulq(rdx, Operand(rbx, rcx, times_4, 10000));
@@ -174,8 +174,8 @@
// __ jmp(Operand(rbx, rcx, times_4, 10000));
__ leaq(rdx, Operand(rbx, rcx, times_4, 10000));
- __ or_(rdx, Immediate(12345));
- __ or_(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ orq(rdx, Immediate(12345));
+ __ orq(rdx, Operand(rbx, rcx, times_4, 10000));
__ nop();
@@ -202,19 +202,19 @@
__ addq(rbx, Immediate(12));
__ addq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
- __ and_(rbx, Immediate(12345));
+ __ andq(rbx, Immediate(12345));
__ cmpq(rbx, Immediate(12345));
__ cmpq(rbx, Immediate(12));
__ cmpq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
__ cmpb(rax, Immediate(100));
- __ or_(rbx, Immediate(12345));
+ __ orq(rbx, Immediate(12345));
__ subq(rbx, Immediate(12));
__ subq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
- __ xor_(rbx, Immediate(12345));
+ __ xorq(rbx, Immediate(12345));
__ imulq(rdx, rcx, Immediate(12));
__ imulq(rdx, rcx, Immediate(1000));
@@ -230,8 +230,8 @@
__ testb(Operand(rax, -20), Immediate(0x9A));
__ nop();
- __ xor_(rdx, Immediate(12345));
- __ xor_(rdx, Operand(rbx, rcx, times_8, 10000));
+ __ xorq(rdx, Immediate(12345));
+ __ xorq(rdx, Operand(rbx, rcx, times_8, 10000));
__ bts(Operand(rbx, rcx, times_8, 10000), rdx);
__ hlt();
__ int3();
=======================================
--- /branches/bleeding_edge/test/cctest/test-macro-assembler-x64.cc Wed Mar
26 11:17:53 2014 UTC
+++ /branches/bleeding_edge/test/cctest/test-macro-assembler-x64.cc Wed Mar
26 12:15:35 2014 UTC
@@ -181,7 +181,7 @@
TestMoveSmi(masm, &exit, 11, Smi::FromInt(-257));
TestMoveSmi(masm, &exit, 12, Smi::FromInt(Smi::kMinValue));
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -277,7 +277,7 @@
TestSmiCompare(masm, &exit, 0x120, Smi::kMaxValue, Smi::kMinValue);
TestSmiCompare(masm, &exit, 0x130, Smi::kMaxValue, Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -380,7 +380,7 @@
__ j(not_equal, &exit);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -450,7 +450,7 @@
TestI64PlusConstantToSmi(masm, &exit, 0xB0, Smi::kMaxValue, 0);
TestI64PlusConstantToSmi(masm, &exit, 0xC0, twice_max, Smi::kMinValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -490,7 +490,7 @@
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -501,7 +501,7 @@
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -512,7 +512,7 @@
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -523,7 +523,7 @@
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -536,7 +536,7 @@
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "zero" non-smi.
__ j(cond, &exit);
@@ -553,7 +553,7 @@
__ j(cond, &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "Negative" non-smi.
__ j(cond, &exit);
@@ -564,7 +564,7 @@
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "Positive" non-smi.
__ j(cond, &exit);
@@ -605,17 +605,17 @@
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
__ incq(rax);
- __ xor_(rdx, Immediate(kSmiTagMask));
+ __ xorq(rdx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
@@ -649,7 +649,7 @@
__ j(NegateCondition(cond), &exit);
// Success
- __ xor_(rax, rax);
+ __ xorq(rax, rax);
__ bind(&exit);
ExitCode(masm);
@@ -736,7 +736,7 @@
TestSmiNeg(masm, &exit, 0x70, Smi::kMaxValue);
TestSmiNeg(masm, &exit, 0x80, -Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -961,7 +961,7 @@
SmiAddOverflowTest(masm, &exit, 0xE0, -42000);
SmiAddOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1182,7 +1182,7 @@
SmiSubOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
SmiSubOverflowTest(masm, &exit, 0x100, 0);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1269,7 +1269,7 @@
TestSmiMul(masm, &exit, 0xd0, (Smi::kMinValue / 2), 2);
TestSmiMul(masm, &exit, 0xe0, (Smi::kMinValue / 2) - 1, 2);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1383,7 +1383,7 @@
TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
- __ xor_(r15, r15); // Success.
+ __ xorq(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
__ popq(r15);
@@ -1493,7 +1493,7 @@
TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
- __ xor_(r15, r15); // Success.
+ __ xorq(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
__ popq(r15);
@@ -1573,7 +1573,7 @@
TestSmiIndex(masm, &exit, 0x40, 1000);
TestSmiIndex(masm, &exit, 0x50, Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1590,7 +1590,7 @@
__ movl(rax, Immediate(id));
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
- __ xor_(rdx, Immediate(kSmiTagMask));
+ __ xorq(rdx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, exit);
__ incq(rax);
@@ -1600,7 +1600,7 @@
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, exit);
__ incq(rax);
@@ -1611,8 +1611,8 @@
Label fail_ok;
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
- __ xor_(rcx, Immediate(kSmiTagMask));
- __ xor_(rdx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
+ __ xorq(rdx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
@@ -1646,7 +1646,7 @@
TestSelectNonSmi(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
TestSelectNonSmi(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1727,7 +1727,7 @@
TestSmiAnd(masm, &exit, 0xA0, Smi::kMinValue, -1);
TestSmiAnd(masm, &exit, 0xB0, Smi::kMinValue, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1810,7 +1810,7 @@
TestSmiOr(masm, &exit, 0xC0, 0x05555555, 0x0fedcba9);
TestSmiOr(masm, &exit, 0xD0, Smi::kMinValue, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1893,7 +1893,7 @@
TestSmiXor(masm, &exit, 0xC0, 0x5555555, 0x0fedcba9);
TestSmiXor(masm, &exit, 0xD0, Smi::kMinValue, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1955,7 +1955,7 @@
TestSmiNot(masm, &exit, 0x70, Smi::kMaxValue);
TestSmiNot(masm, &exit, 0x80, 0x05555555);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2050,7 +2050,7 @@
TestSmiShiftLeft(masm, &exit, 0x150, Smi::kMinValue);
TestSmiShiftLeft(masm, &exit, 0x190, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2156,7 +2156,7 @@
TestSmiShiftLogicalRight(masm, &exit, 0xB0, Smi::kMinValue);
TestSmiShiftLogicalRight(masm, &exit, 0xD0, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2225,7 +2225,7 @@
TestSmiShiftArithmeticRight(masm, &exit, 0x60, Smi::kMinValue);
TestSmiShiftArithmeticRight(masm, &exit, 0x70, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2291,7 +2291,7 @@
TestPositiveSmiPowerUp(masm, &exit, 0x120, 65536);
TestPositiveSmiPowerUp(masm, &exit, 0x140, Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2796,7 +2796,7 @@
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
__ addq(rsp, Immediate(1 * kPointerSize));
ExitCode(masm);
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.