Revision: 2607
Author: [email protected]
Date: Mon Aug  3 04:05:26 2009
Log: X64: Reenabled RSet.

Review URL: http://codereview.chromium.org/160453

http://code.google.com/p/v8/source/detail?r=2607

Modified:
  /branches/bleeding_edge/src/heap-inl.h
  /branches/bleeding_edge/src/heap.cc
  /branches/bleeding_edge/src/heap.h
  /branches/bleeding_edge/src/spaces-inl.h
  /branches/bleeding_edge/src/spaces.h
  /branches/bleeding_edge/src/x64/assembler-x64.h
  /branches/bleeding_edge/src/x64/ic-x64.cc
  /branches/bleeding_edge/src/x64/macro-assembler-x64.cc

=======================================
--- /branches/bleeding_edge/src/heap-inl.h      Wed Jul 22 03:01:25 2009
+++ /branches/bleeding_edge/src/heap-inl.h      Mon Aug  3 04:05:26 2009
@@ -159,9 +159,7 @@
    if (new_space_.Contains(address)) return;
    ASSERT(!new_space_.FromSpaceContains(address));
    SLOW_ASSERT(Contains(address + offset));
-#ifndef V8_HOST_ARCH_64_BIT
    Page::SetRSet(address, offset);
-#endif  // V8_HOST_ARCH_64_BIT
  }


=======================================
--- /branches/bleeding_edge/src/heap.cc Thu Jul 30 02:13:48 2009
+++ /branches/bleeding_edge/src/heap.cc Mon Aug  3 04:05:26 2009
@@ -681,33 +681,11 @@
    // Copy objects reachable from weak pointers.
    GlobalHandles::IterateWeakRoots(&scavenge_visitor);

-#ifdef V8_HOST_ARCH_64_BIT
-  // TODO(X64): Make this go away again. We currently disable RSets for
-  // 64-bit-mode.
-  HeapObjectIterator old_pointer_iterator(old_pointer_space_);
-  while (old_pointer_iterator.has_next()) {
-    HeapObject* heap_object = old_pointer_iterator.next();
-    heap_object->Iterate(&scavenge_visitor);
-  }
-  HeapObjectIterator map_iterator(map_space_);
-  while (map_iterator.has_next()) {
-    HeapObject* heap_object = map_iterator.next();
-    heap_object->Iterate(&scavenge_visitor);
-  }
-  LargeObjectIterator lo_iterator(lo_space_);
-  while (lo_iterator.has_next()) {
-    HeapObject* heap_object = lo_iterator.next();
-    if (heap_object->IsFixedArray()) {
-      heap_object->Iterate(&scavenge_visitor);
-    }
-  }
-#else  // !defined(V8_HOST_ARCH_64_BIT)
    // Copy objects reachable from the old generation.  By definition,
    // there are no intergenerational pointers in code or data spaces.
    IterateRSet(old_pointer_space_, &ScavengePointer);
    IterateRSet(map_space_, &ScavengePointer);
    lo_space_->IterateRSet(&ScavengePointer);
-#endif

    // Copy objects reachable from cells by scavenging cell values directly.
    HeapObjectIterator cell_iterator(cell_space_);
@@ -830,13 +808,11 @@


  int Heap::UpdateRSet(HeapObject* obj) {
-#ifndef V8_HOST_ARCH_64_BIT
-  // TODO(X64) Reenable RSet when we have a working 64-bit layout of Page.
    ASSERT(!InNewSpace(obj));
    // Special handling of fixed arrays to iterate the body based on the  
start
    // address and offset.  Just iterating the pointers as in  
UpdateRSetVisitor
    // will not work because Page::SetRSet needs to have the start of the
-  // object.
+  // object for large object pages.
    if (obj->IsFixedArray()) {
      FixedArray* array = FixedArray::cast(obj);
      int length = array->length();
@@ -853,7 +829,6 @@
      UpdateRSetVisitor v;
      obj->Iterate(&v);
    }
-#endif  // V8_HOST_ARCH_64_BIT
    return obj->Size();
  }

=======================================
--- /branches/bleeding_edge/src/heap.h  Wed Jul 29 05:34:21 2009
+++ /branches/bleeding_edge/src/heap.h  Mon Aug  3 04:05:26 2009
@@ -257,7 +257,7 @@
    // address with the mask will result in the start address of the new  
space
    // for all addresses in either semispace.
    static Address NewSpaceStart() { return new_space_.start(); }
-  static uint32_t NewSpaceMask() { return new_space_.mask(); }
+  static uintptr_t NewSpaceMask() { return new_space_.mask(); }
    static Address NewSpaceTop() { return new_space_.top(); }

    static NewSpace* new_space() { return &new_space_; }
@@ -1123,11 +1123,9 @@
          HeapObject* object = HeapObject::cast(*current);
          ASSERT(Heap::Contains(object));
          ASSERT(object->map()->IsMap());
-#ifndef V8_TARGET_ARCH_X64
          if (Heap::InNewSpace(object)) {
            ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
          }
-#endif
        }
      }
    }
=======================================
--- /branches/bleeding_edge/src/spaces-inl.h    Thu Jul  2 00:48:05 2009
+++ /branches/bleeding_edge/src/spaces-inl.h    Mon Aug  3 04:05:26 2009
@@ -103,9 +103,9 @@
  // The address of the rset word containing the bit for this word is  
computed as:
  //    page_address + words * 4
  // For a 64-bit address, if it is:
-// | page address | quadwords(5) | bit offset(5) | pointer alignment (3) |
+// | page address | words(5) | bit offset(5) | pointer alignment (3) |
  // The address of the rset word containing the bit for this word is  
computed as:
-//    page_address + quadwords * 4 + kRSetOffset.
+//    page_address + words * 4 + kRSetOffset.
  // The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
  // even on the X64 architecture.

@@ -115,7 +115,7 @@

    Page* page = Page::FromAddress(address);
    uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) +  
offset,
-                                             kObjectAlignmentBits);
+                                             kPointerSizeLog2);
    *bitmask = 1 << (bit_offset % kBitsPerInt);

    Address rset_address =
=======================================
--- /branches/bleeding_edge/src/spaces.h        Wed Jul 29 01:10:19 2009
+++ /branches/bleeding_edge/src/spaces.h        Mon Aug  3 04:05:26 2009
@@ -99,8 +99,11 @@
  // its page offset by 32. Therefore, the object area in a page starts at  
the
  // 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so  
that
  // the first two words (64 bits) in a page can be used for other purposes.
-// TODO(X64): This description only represents the 32-bit layout.
-// On the 64-bit platform, we add an offset to the start of the remembered  
set.
+//
+// On the 64-bit platform, we add an offset to the start of the remembered  
set,
+// and pointers are aligned to 8-byte pointer size. This means that we need
+// only 128 bytes for the RSet, and only get two bytes free in the RSet's  
RSet.
+// For this reason we add an offset to get room for the Page data at the  
start.
  //
  // The mark-compact collector transforms a map pointer into a page index  
and a
  // page offset. The map space can have up to 1024 pages, and 8M bytes  
(1024 *
@@ -118,7 +121,7 @@
    // from [page_addr .. page_addr + kPageSize[
    //
    // Note that this function only works for addresses in normal paged
-  // spaces and addresses in the first 8K of large object pages (ie,
+  // spaces and addresses in the first 8K of large object pages (i.e.,
    // the start of large objects but not necessarily derived pointers
    // within them).
    INLINE(static Page* FromAddress(Address a)) {
@@ -218,7 +221,7 @@
    // Page size mask.
    static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;

-  // The offset of the remembered set in a page, in addition to the empty  
words
+  // The offset of the remembered set in a page, in addition to the empty  
bytes
    // formed as the remembered bits of the remembered set itself.
  #ifdef V8_TARGET_ARCH_X64
    static const int kRSetOffset = 4 * kPointerSize;  // Room for four  
pointers.
@@ -234,7 +237,7 @@
    // to align start of rset to a uint32_t address.
    static const int kObjectStartOffset = 256;

-  // The start offset of the remembered set in a page.
+  // The start offset of the used part of the remembered set in a page.
    static const int kRSetStartOffset = kRSetOffset +
        kObjectStartOffset / kBitsPerPointer;

@@ -264,16 +267,16 @@
    // low-order bit for large object pages will be cleared.
    int is_normal_page;

-  // The following fields overlap with remembered set, they can only
+  // The following fields may overlap with remembered set, they can only
    // be used in the mark-compact collector when remembered set is not
    // used.

-  // The allocation pointer after relocating objects to this page.
-  Address mc_relocation_top;
-
    // The index of the page in its owner space.
    int mc_page_index;

+  // The allocation pointer after relocating objects to this page.
+  Address mc_relocation_top;
+
    // The forwarding address of the first live object in this page.
    Address mc_first_forwarded;

@@ -1165,7 +1168,7 @@
    // The start address of the space and a bit mask. Anding an address in  
the
    // new space with the mask will result in the start address.
    Address start() { return start_; }
-  uint32_t mask() { return address_mask_; }
+  uintptr_t mask() { return address_mask_; }

    // The allocation top and limit addresses.
    Address* allocation_top_address() { return &allocation_info_.top; }
=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.h     Thu Jul 30 02:18:14 2009
+++ /branches/bleeding_edge/src/x64/assembler-x64.h     Mon Aug  3 04:05:26 2009
@@ -44,14 +44,24 @@

  // Test whether a 64-bit value is in a specific range.
  static inline bool is_uint32(int64_t x) {
-  const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
+  static const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
    return x == (x & kUInt32Mask);
  }

  static inline bool is_int32(int64_t x) {
-  const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
+  static const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
    return is_uint32(x - kMinIntValue);
  }
+
+static inline bool uint_is_int32(uint64_t x) {
+  static const uint64_t kMaxIntValue = V8_UINT64_C(0x80000000);
+  return x < kMaxIntValue;
+}
+
+static inline bool is_uint32(uint64_t x) {
+  static const uint64_t kMaxUIntValue = V8_UINT64_C(0x100000000);
+  return x < kMaxUIntValue;
+}

  // CPU Registers.
  //
=======================================
--- /branches/bleeding_edge/src/x64/ic-x64.cc   Mon Aug  3 03:53:45 2009
+++ /branches/bleeding_edge/src/x64/ic-x64.cc   Mon Aug  3 04:05:26 2009
@@ -505,7 +505,8 @@
    // rax: value
    // rcx: FixedArray
    // rbx: index (as a smi)
-  __ movq(Operand(rcx, rbx, times_4, FixedArray::kHeaderSize -  
kHeapObjectTag),
+  __ movq(Operand(rcx, rbx, times_half_pointer_size,
+                  FixedArray::kHeaderSize - kHeapObjectTag),
           rax);
    // Update write barrier for the elements array address.
    __ movq(rdx, rax);
=======================================
--- /branches/bleeding_edge/src/x64/macro-assembler-x64.cc      Thu Jul 30  
02:18:14 2009
+++ /branches/bleeding_edge/src/x64/macro-assembler-x64.cc      Mon Aug  3  
04:05:26 2009
@@ -31,6 +31,7 @@
  #include "codegen-inl.h"
  #include "assembler-x64.h"
  #include "macro-assembler-x64.h"
+#include "serialize.h"
  #include "debug.h"

  namespace v8 {
@@ -45,11 +46,156 @@
  }


-// TODO(x64): For now, the write barrier is disabled on x64 and we
-// therefore generate no code.  This should be fixed when the write
-// barrier is enabled.
-void MacroAssembler::RecordWrite(Register object, int offset,
-                                 Register value, Register scratch) {
+
+static void RecordWriteHelper(MacroAssembler* masm,
+                              Register object,
+                              Register addr,
+                              Register scratch) {
+  Label fast;
+
+  // Compute the page address from the heap object pointer, leave it
+  // in 'object'.
+  ASSERT(is_int32(~Page::kPageAlignmentMask));
+  masm->and_(object,
+             Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
+
+  // Compute the bit addr in the remembered set, leave it in "addr".
+  masm->subq(addr, object);
+  masm->shr(addr, Immediate(kPointerSizeLog2));
+
+  // If the bit offset lies beyond the normal remembered set range, it is  
in
+  // the extra remembered set area of a large object.
+  masm->cmpq(addr, Immediate(Page::kPageSize / kPointerSize));
+  masm->j(less, &fast);
+
+  // Adjust 'addr' to be relative to the start of the extra remembered set
+  // and the page address in 'object' to be the address of the extra
+  // remembered set.
+  masm->subq(addr, Immediate(Page::kPageSize / kPointerSize));
+  // Load the array length into 'scratch'.
+  masm->movl(scratch,
+             Operand(object,
+                     Page::kObjectStartOffset +  
FixedArray::kLengthOffset));
+  // Extra remembered set starts right after FixedArray.
+  // Add the page header, array header, and array body size
+  // (length * pointer size) to the page address to find the extra  
remembered
+  // set start.
+  masm->lea(object,
+            Operand(object, scratch, times_pointer_size,
+                    Page::kObjectStartOffset + FixedArray::kHeaderSize));
+
+  // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+  // to limit code size. We should probably evaluate this decision by
+  // measuring the performance of an equivalent implementation using
+  // "simpler" instructions
+  masm->bind(&fast);
+  masm->bts(Operand(object, Page::kRSetOffset), addr);
+}
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+  RecordWriteStub(Register object, Register addr, Register scratch)
+      : object_(object), addr_(addr), scratch_(scratch) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Register object_;
+  Register addr_;
+  Register scratch_;
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch  
reg %d)\n",
+           object_.code(), addr_.code(), scratch_.code());
+  }
+#endif
+
+  // Minor key encoding in 12 bits of three registers (object, address and
+  // scratch) OOOOAAAASSSS.
+  class ScratchBits: public BitField<uint32_t, 0, 4> {};
+  class AddressBits: public BitField<uint32_t, 4, 4> {};
+  class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    // Encode the registers.
+    return ObjectBits::encode(object_.code()) |
+           AddressBits::encode(addr_.code()) |
+           ScratchBits::encode(scratch_.code());
+  }
+};
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  RecordWriteHelper(masm, object_, addr_, scratch_);
+  masm->ret(0);
+}
+
+
+// Set the remembered set bit for [object+offset].
+// object is the object being stored into, value is the object being  
stored.
+// If offset is zero, then the scratch register contains the array index  
into
+// the elements array represented as a Smi.
+// All registers are clobbered by the operation.
+void MacroAssembler::RecordWrite(Register object,
+                                 int offset,
+                                 Register value,
+                                 Register scratch) {
+  // First, check if a remembered set write is even needed. The tests below
+  // catch stores of Smis and stores into young gen (which does not have  
space
+  // for the remembered set bits.
+  Label done;
+
+  // Test that the object address is not in the new space.  We cannot
+  // set remembered set bits in the new space.
+  movq(value, object);
+  ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+  and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+  movq(kScratchRegister, ExternalReference::new_space_start());
+  cmpq(value, kScratchRegister);
+  j(equal, &done);
+
+  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
+    // Compute the bit offset in the remembered set, leave it in 'value'.
+    lea(value, Operand(object, offset));
+    ASSERT(is_int32(Page::kPageAlignmentMask));
+    and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
+    shr(value, Immediate(kObjectAlignmentBits));
+
+    // Compute the page address from the heap object pointer, leave it in
+    // 'object' (immediate value is sign extended).
+    and_(object, Immediate(~Page::kPageAlignmentMask));
+
+    // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+    // to limit code size. We should probably evaluate this decision by
+    // measuring the performance of an equivalent implementation using
+    // "simpler" instructions
+    bts(Operand(object, Page::kRSetOffset), value);
+  } else {
+    Register dst = scratch;
+    if (offset != 0) {
+      lea(dst, Operand(object, offset));
+    } else {
+      // array access: calculate the destination address in the same  
manner as
+      // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 4 to get an  
offset
+      // into an array of words.
+      lea(dst, Operand(object, dst, times_half_pointer_size,
+                       FixedArray::kHeaderSize - kHeapObjectTag));
+    }
+    // If we are already generating a shared stub, not inlining the
+    // record write code isn't going to save us any memory.
+    if (generating_stub()) {
+      RecordWriteHelper(this, object, dst, value);
+    } else {
+      RecordWriteStub stub(object, dst, value);
+      CallStub(&stub);
+    }
+  }
+
+  bind(&done);
  }



--~--~---------~--~----~------------~-------~--~----~
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
-~----------~----~----~----~------~----~------~--~---

Reply via email to