Revision: 4725
Author: l...@chromium.org
Date: Wed May 26 03:02:07 2010
Log: X64: Make smi memory operations work directly on the embedded value.
Adds Operand-relative Operand constructor.
Review URL: http://codereview.chromium.org/2242002
http://code.google.com/p/v8/source/detail?r=4725
Modified:
/branches/bleeding_edge/src/x64/assembler-x64.cc
/branches/bleeding_edge/src/x64/assembler-x64.h
/branches/bleeding_edge/src/x64/macro-assembler-x64.cc
/branches/bleeding_edge/test/cctest/test-macro-assembler-x64.cc
=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.cc Thu May 20 06:54:31
2010
+++ /branches/bleeding_edge/src/x64/assembler-x64.cc Wed May 26 03:02:07
2010
@@ -239,6 +239,52 @@
}
+Operand::Operand(const Operand& operand, int32_t offset) {
+ ASSERT(operand.len_ >= 1);
+ // Operand encodes REX ModR/M [SIB] [Disp].
+ byte modrm = operand.buf_[0];
+ ASSERT(modrm < 0xC0); // Disallow mode 3 (register target).
+ bool has_sib = ((modrm & 0x07) == 0x04);
+ byte mode = modrm & 0xC0;
+ int disp_offset = has_sib ? 2 : 1;
+ int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
+ // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+ // displacement.
+ bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP
base.
+ int32_t disp_value = 0;
+ if (mode == 0x80 || is_baseless) {
+ // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+ disp_value = *reinterpret_cast<const
int32_t*>(&operand.buf_[disp_offset]);
+ } else if (mode == 0x40) {
+ // Mode 1: Byte displacement.
+ disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
+ }
+
+ // Write new operand with same registers, but with modified displacement.
+ ASSERT(offset >= 0 ? disp_value + offset > disp_value
+ : disp_value + offset < disp_value); // No overflow.
+ disp_value += offset;
+ rex_ = operand.rex_;
+ if (!is_int8(disp_value) || is_baseless) {
+ // Need 32 bits of displacement, mode 2 or mode 1 with register
rbp/r13.
+ buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
+ len_ = disp_offset + 4;
+ Memory::int32_at(&buf_[disp_offset]) = disp_value;
+ } else if (disp_value != 0 || (base_reg == 0x05)) {
+ // Need 8 bits of displacement.
+ buf_[0] = (modrm & 0x3f) | 0x40; // Mode 1.
+ len_ = disp_offset + 1;
+ buf_[disp_offset] = static_cast<byte>(disp_value);
+ } else {
+ // Need no displacement.
+ buf_[0] = (modrm & 0x3f); // Mode 0.
+ len_ = disp_offset;
+ }
+ if (has_sib) {
+ buf_[1] = operand.buf_[1];
+ }
+}
+
//
-----------------------------------------------------------------------------
// Implementation of Assembler.
=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.h Wed May 19 01:16:52 2010
+++ /branches/bleeding_edge/src/x64/assembler-x64.h Wed May 26 03:02:07 2010
@@ -300,12 +300,16 @@
ScaleFactor scale,
int32_t disp);
+ // Offset from existing memory operand.
+ // Offset is added to existing displacement as 32-bit signed values and
+ // this must not overflow.
+ Operand(const Operand& base, int32_t offset);
+
private:
byte rex_;
byte buf_[10];
// The number of bytes in buf_.
unsigned int len_;
- RelocInfo::Mode rmode_;
// Set the ModR/M byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
=======================================
--- /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Wed May 26
02:24:44 2010
+++ /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Wed May 26
03:02:07 2010
@@ -603,7 +603,7 @@
}
-void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
+void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
cmpq(dst, src);
}
@@ -614,13 +614,7 @@
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- if (src->value() == 0) {
- // Only tagged long smi to have 32-bit representation.
- cmpq(dst, Immediate(0));
- } else {
- Move(kScratchRegister, src);
- cmpq(dst, kScratchRegister);
- }
+ cmpl(Operand(dst, kIntSize), Immediate(src->value()));
}
@@ -922,8 +916,7 @@
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
- Move(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ addl(Operand(dst, kIntSize), Immediate(constant->value()));
}
}
@@ -1607,13 +1600,7 @@
void MacroAssembler::Test(const Operand& src, Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- testl(src, Immediate(static_cast<int32_t>(smi)));
- } else {
- Move(kScratchRegister, source);
- testq(src, kScratchRegister);
- }
+ testl(Operand(src, kIntSize), Immediate(source->value()));
}
=======================================
--- /branches/bleeding_edge/test/cctest/test-macro-assembler-x64.cc Thu
May 6 01:15:15 2010
+++ /branches/bleeding_edge/test/cctest/test-macro-assembler-x64.cc Wed May
26 03:02:07 2010
@@ -61,6 +61,7 @@
using v8::internal::r13;
using v8::internal::r14;
using v8::internal::r15;
+using v8::internal::times_pointer_size;
using v8::internal::FUNCTION_CAST;
using v8::internal::CodeDesc;
using v8::internal::less_equal;
@@ -75,6 +76,8 @@
using v8::internal::Smi;
using v8::internal::kSmiTagMask;
using v8::internal::kSmiValueSize;
+using v8::internal::kPointerSize;
+using v8::internal::kIntSize;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
@@ -2053,4 +2056,358 @@
}
+TEST(OperandOffset) {
+ int data[256];
+ for (int i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
+
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ __ push(r12);
+ __ push(r13);
+ __ push(rbx);
+ __ push(rbp);
+ __ push(Immediate(0x100)); // <-- rbp
+ __ movq(rbp, rsp);
+ __ push(Immediate(0x101));
+ __ push(Immediate(0x102));
+ __ push(Immediate(0x103));
+ __ push(Immediate(0x104));
+ __ push(Immediate(0x105)); // <-- rbx
+ __ push(Immediate(0x106));
+ __ push(Immediate(0x107));
+ __ push(Immediate(0x108));
+ __ push(Immediate(0x109)); // <-- rsp
+ // rbp = rsp[9]
+ // r12 = rsp[3]
+ // rbx = rsp[5]
+ // r13 = rsp[7]
+ __ lea(r12, Operand(rsp, 3 * kPointerSize));
+ __ lea(r13, Operand(rbp, -3 * kPointerSize));
+ __ lea(rbx, Operand(rbp, -5 * kPointerSize));
+ __ movl(rcx, Immediate(2));
+ __ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE);
+ __ movl(rax, Immediate(1));
+
+ Operand sp0 = Operand(rsp, 0);
+
+ // Test 1.
+ __ movl(rdx, sp0); // Sanity check.
+ __ cmpl(rdx, Immediate(0x109));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ // Test 2.
+ // Zero to non-zero displacement.
+ __ movl(rdx, Operand(sp0, 2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x107));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ Operand sp2 = Operand(rsp, 2 * kPointerSize);
+
+ // Test 3.
+ __ movl(rdx, sp2); // Sanity check.
+ __ cmpl(rdx, Immediate(0x107));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(sp2, 2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x105));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ // Non-zero to zero displacement.
+ __ movl(rdx, Operand(sp2, -2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x109));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ Operand sp2c2 = Operand(rsp, rcx, times_pointer_size, 2 * kPointerSize);
+
+ // Test 6.
+ __ movl(rdx, sp2c2); // Sanity check.
+ __ cmpl(rdx, Immediate(0x105));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(sp2c2, 2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x103));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ // Non-zero to zero displacement.
+ __ movl(rdx, Operand(sp2c2, -2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x107));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+
+ Operand bp0 = Operand(rbp, 0);
+
+ // Test 9.
+ __ movl(rdx, bp0); // Sanity check.
+ __ cmpl(rdx, Immediate(0x100));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ // Zero to non-zero displacement.
+ __ movl(rdx, Operand(bp0, -2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x102));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ Operand bp2 = Operand(rbp, -2 * kPointerSize);
+
+ // Test 11.
+ __ movl(rdx, bp2); // Sanity check.
+ __ cmpl(rdx, Immediate(0x102));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ // Non-zero to zero displacement.
+ __ movl(rdx, Operand(bp2, 2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x100));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(bp2, -2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x104));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ Operand bp2c4 = Operand(rbp, rcx, times_pointer_size, -4 * kPointerSize);
+
+ // Test 14:
+ __ movl(rdx, bp2c4); // Sanity check.
+ __ cmpl(rdx, Immediate(0x102));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(bp2c4, 2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x100));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(bp2c4, -2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x104));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ Operand bx0 = Operand(rbx, 0);
+
+ // Test 17.
+ __ movl(rdx, bx0); // Sanity check.
+ __ cmpl(rdx, Immediate(0x105));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(bx0, 5 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x100));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(bx0, -4 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x109));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ Operand bx2 = Operand(rbx, 2 * kPointerSize);
+
+ // Test 20.
+ __ movl(rdx, bx2); // Sanity check.
+ __ cmpl(rdx, Immediate(0x103));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(bx2, 2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x101));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ // Non-zero to zero displacement.
+ __ movl(rdx, Operand(bx2, -2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x105));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ Operand bx2c2 = Operand(rbx, rcx, times_pointer_size, -2 * kPointerSize);
+
+ // Test 23.
+ __ movl(rdx, bx2c2); // Sanity check.
+ __ cmpl(rdx, Immediate(0x105));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(bx2c2, 2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x103));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(bx2c2, -2 * kPointerSize));
+ __ cmpl(rdx, Immediate(0x107));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ Operand r80 = Operand(r8, 0);
+
+ // Test 26.
+ __ movl(rdx, r80); // Sanity check.
+ __ cmpl(rdx, Immediate(0x80808080));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r80, -8 * kIntSize));
+ __ cmpl(rdx, Immediate(0x78787878));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r80, 8 * kIntSize));
+ __ cmpl(rdx, Immediate(0x88888888));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r80, -64 * kIntSize));
+ __ cmpl(rdx, Immediate(0x40404040));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r80, 64 * kIntSize));
+ __ cmpl(rdx, Immediate(0xC0C0C0C0));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ Operand r88 = Operand(r8, 8 * kIntSize);
+
+ // Test 31.
+ __ movl(rdx, r88); // Sanity check.
+ __ cmpl(rdx, Immediate(0x88888888));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r88, -8 * kIntSize));
+ __ cmpl(rdx, Immediate(0x80808080));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r88, 8 * kIntSize));
+ __ cmpl(rdx, Immediate(0x90909090));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r88, -64 * kIntSize));
+ __ cmpl(rdx, Immediate(0x48484848));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r88, 64 * kIntSize));
+ __ cmpl(rdx, Immediate(0xC8C8C8C8));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+
+ Operand r864 = Operand(r8, 64 * kIntSize);
+
+ // Test 36.
+ __ movl(rdx, r864); // Sanity check.
+ __ cmpl(rdx, Immediate(0xC0C0C0C0));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r864, -8 * kIntSize));
+ __ cmpl(rdx, Immediate(0xB8B8B8B8));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r864, 8 * kIntSize));
+ __ cmpl(rdx, Immediate(0xC8C8C8C8));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r864, -64 * kIntSize));
+ __ cmpl(rdx, Immediate(0x80808080));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r864, 32 * kIntSize));
+ __ cmpl(rdx, Immediate(0xE0E0E0E0));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ // 32-bit offset to 8-bit offset.
+ __ movl(rdx, Operand(r864, -60 * kIntSize));
+ __ cmpl(rdx, Immediate(0x84848484));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r864, 60 * kIntSize));
+ __ cmpl(rdx, Immediate(0xFCFCFCFC));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ // Test unaligned offsets.
+
+ // Test 43.
+ __ movl(rdx, Operand(r80, 2));
+ __ cmpl(rdx, Immediate(0x81818080));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r80, -2));
+ __ cmpl(rdx, Immediate(0x80807F7F));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r80, 126));
+ __ cmpl(rdx, Immediate(0xA0A09F9F));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r80, -126));
+ __ cmpl(rdx, Immediate(0x61616060));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r80, 254));
+ __ cmpl(rdx, Immediate(0xC0C0BFBF));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ __ movl(rdx, Operand(r80, -254));
+ __ cmpl(rdx, Immediate(0x41414040));
+ __ j(not_equal, &exit);
+ __ incq(rax);
+
+ // Success.
+
+ __ movl(rax, Immediate(0));
+ __ bind(&exit);
+ __ lea(rsp, Operand(rbp, kPointerSize));
+ __ pop(rbp);
+ __ pop(rbx);
+ __ pop(r13);
+ __ pop(r12);
+ __ ret(0);
+
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+
#undef __
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev