Author: [EMAIL PROTECTED]
Date: Fri Oct 17 02:13:27 2008
New Revision: 517

Modified:
    branches/bleeding_edge/src/globals.h
    branches/bleeding_edge/src/heap-inl.h
    branches/bleeding_edge/src/heap.cc
    branches/bleeding_edge/src/heap.h
    branches/bleeding_edge/src/spaces-inl.h
    branches/bleeding_edge/src/spaces.cc
    branches/bleeding_edge/src/spaces.h
    branches/bleeding_edge/test/cctest/test-spaces.cc

Log:
- Removed a few indirections by making the two SemiSpaces
   part of NewSpace and made NewSpace statically allocated.
- Eliminated indirection in MigrateObject.

Review URL: http://codereview.chromium.org/7619

Modified: branches/bleeding_edge/src/globals.h
==============================================================================
--- branches/bleeding_edge/src/globals.h        (original)
+++ branches/bleeding_edge/src/globals.h        Fri Oct 17 02:13:27 2008
@@ -375,7 +375,6 @@
  #define OBJECT_SIZE_ALIGN(value)                                \
    ((value + kObjectAlignmentMask) & ~kObjectAlignmentMask)

-
  // The expression OFFSET_OF(type, field) computes the byte-offset
  // of the specified field relative to the containing type. This
  // corresponds to 'offsetof' (in stddef.h), except that it doesn't

Modified: branches/bleeding_edge/src/heap-inl.h
==============================================================================
--- branches/bleeding_edge/src/heap-inl.h       (original)
+++ branches/bleeding_edge/src/heap-inl.h       Fri Oct 17 02:13:27 2008
@@ -51,7 +51,7 @@
    Counters::objs_since_last_young.Increment();
  #endif
    if (NEW_SPACE == space) {
-    return new_space_->AllocateRaw(size_in_bytes);
+    return new_space_.AllocateRaw(size_in_bytes);
    }

    Object* result;
@@ -100,17 +100,17 @@


  bool Heap::InNewSpace(Object* object) {
-  return new_space_->Contains(object);
+  return new_space_.Contains(object);
  }


  bool Heap::InFromSpace(Object* object) {
-  return new_space_->FromSpaceContains(object);
+  return new_space_.FromSpaceContains(object);
  }


  bool Heap::InToSpace(Object* object) {
-  return new_space_->ToSpaceContains(object);
+  return new_space_.ToSpaceContains(object);
  }


@@ -118,14 +118,14 @@
    // An object should be promoted if:
    // - the object has survived a scavenge operation or
    // - to space is already 25% full.
-  return old_address < new_space_->age_mark()
-      || (new_space_->Size() + object_size) >= (new_space_->Capacity() >>  
2);
+  return old_address < new_space_.age_mark()
+      || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
  }


  void Heap::RecordWrite(Address address, int offset) {
-  if (new_space_->Contains(address)) return;
-  ASSERT(!new_space_->FromSpaceContains(address));
+  if (new_space_.Contains(address)) return;
+  ASSERT(!new_space_.FromSpaceContains(address));
    SLOW_ASSERT(Contains(address + offset));
    Page::SetRSet(address, offset);
  }

Modified: branches/bleeding_edge/src/heap.cc
==============================================================================
--- branches/bleeding_edge/src/heap.cc  (original)
+++ branches/bleeding_edge/src/heap.cc  Fri Oct 17 02:13:27 2008
@@ -57,8 +57,7 @@
    SYMBOL_LIST(SYMBOL_ALLOCATION)
  #undef SYMBOL_ALLOCATION

-
-NewSpace* Heap::new_space_ = NULL;
+NewSpace Heap::new_space_;
  OldSpace* Heap::old_pointer_space_ = NULL;
  OldSpace* Heap::old_data_space_ = NULL;
  OldSpace* Heap::code_space_ = NULL;
@@ -103,7 +102,7 @@
  int Heap::Capacity() {
    if (!HasBeenSetup()) return 0;

-  return new_space_->Capacity() +
+  return new_space_.Capacity() +
        old_pointer_space_->Capacity() +
        old_data_space_->Capacity() +
        code_space_->Capacity() +
@@ -114,7 +113,7 @@
  int Heap::Available() {
    if (!HasBeenSetup()) return 0;

-  return new_space_->Available() +
+  return new_space_.Available() +
        old_pointer_space_->Available() +
        old_data_space_->Available() +
        code_space_->Available() +
@@ -123,8 +122,7 @@


  bool Heap::HasBeenSetup() {
-  return new_space_ != NULL &&
-         old_pointer_space_ != NULL &&
+  return old_pointer_space_ != NULL &&
           old_data_space_ != NULL &&
           code_space_ != NULL &&
           map_space_ != NULL &&
@@ -161,7 +159,7 @@
    // and does not count available bytes already in the old space or code
    // space.  Undercounting is safe---we may get an unrequested full GC when
    // a scavenge would have succeeded.
-  if (MemoryAllocator::MaxAvailable() <= new_space_->Size()) {
+  if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
      Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
      return MARK_COMPACTOR;
    }
@@ -179,24 +177,24 @@
    // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set.  The
    // following logic is used to avoid double logging.
  #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
-  if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
    if (FLAG_heap_stats) {
      ReportHeapStatistics("Before GC");
    } else if (FLAG_log_gc) {
-    new_space_->ReportStatistics();
+    new_space_.ReportStatistics();
    }
-  if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
  #elif defined(DEBUG)
    if (FLAG_heap_stats) {
-    new_space_->CollectStatistics();
+    new_space_.CollectStatistics();
      ReportHeapStatistics("Before GC");
-    new_space_->ClearHistograms();
+    new_space_.ClearHistograms();
    }
  #elif defined(ENABLE_LOGGING_AND_PROFILING)
    if (FLAG_log_gc) {
-    new_space_->CollectStatistics();
-    new_space_->ReportStatistics();
-    new_space_->ClearHistograms();
+    new_space_.CollectStatistics();
+    new_space_.ReportStatistics();
+    new_space_.ClearHistograms();
    }
  #endif
  }
@@ -211,12 +209,12 @@
    if (FLAG_heap_stats) {
      ReportHeapStatistics("After GC");
    } else if (FLAG_log_gc) {
-    new_space_->ReportStatistics();
+    new_space_.ReportStatistics();
    }
  #elif defined(DEBUG)
    if (FLAG_heap_stats) ReportHeapStatistics("After GC");
  #elif defined(ENABLE_LOGGING_AND_PROFILING)
-  if (FLAG_log_gc) new_space_->ReportStatistics();
+  if (FLAG_log_gc) new_space_.ReportStatistics();
  #endif
  }
  #endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@@ -329,7 +327,7 @@

    switch (space) {
      case NEW_SPACE:
-      return new_space_->Available() >= requested_size;
+      return new_space_.Available() >= requested_size;
      case OLD_POINTER_SPACE:
        return old_pointer_space_->Available() >= requested_size;
      case OLD_DATA_SPACE:
@@ -461,7 +459,7 @@

   private:
    void CopyObject(Object** p) {
-    if (!Heap::InFromSpace(*p)) return;
+    if (!Heap::InNewSpace(*p)) return;
      Heap::CopyObject(reinterpret_cast<HeapObject**>(p));
    }
  };
@@ -510,21 +508,21 @@
    LOG(ResourceEvent("scavenge", "begin"));

    scavenge_count_++;
-  if (new_space_->Capacity() < new_space_->MaximumCapacity() &&
+  if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
        scavenge_count_ > new_space_growth_limit_) {
      // Double the size of the new space, and double the limit.  The next
      // doubling attempt will occur after the current  
new_space_growth_limit_
      // more collections.
      // TODO(1240712): NewSpace::Double has a return value which is
      // ignored here.
-    new_space_->Double();
+    new_space_.Double();
      new_space_growth_limit_ *= 2;
    }

    // Flip the semispaces.  After flipping, to space is empty, from space  
has
    // live objects.
-  new_space_->Flip();
-  new_space_->ResetAllocationInfo();
+  new_space_.Flip();
+  new_space_.ResetAllocationInfo();

    // We need to sweep newly copied objects which can be in either the to  
space
    // or the old space.  For to space objects, we use a mark.  Newly copied
@@ -540,9 +538,9 @@
    // in size.  Using the new space to record promoted addresses makes the
    // scavenge collector agnostic to the allocation strategy (eg, linear or
    // free-list) used in old space.
-  Address new_mark = new_space_->ToSpaceLow();
-  Address promoted_mark = new_space_->ToSpaceHigh();
-  promoted_top = new_space_->ToSpaceHigh();
+  Address new_mark = new_space_.ToSpaceLow();
+  Address promoted_mark = new_space_.ToSpaceHigh();
+  promoted_top = new_space_.ToSpaceHigh();

    CopyVisitor copy_visitor;
    // Copy roots.
@@ -557,15 +555,15 @@
    bool has_processed_weak_pointers = false;

    while (true) {
-    ASSERT(new_mark <= new_space_->top());
+    ASSERT(new_mark <= new_space_.top());
      ASSERT(promoted_mark >= promoted_top);

      // Copy objects reachable from newly copied objects.
-    while (new_mark < new_space_->top() || promoted_mark > promoted_top) {
+    while (new_mark < new_space_.top() || promoted_mark > promoted_top) {
        // Sweep newly copied objects in the to space.  The allocation  
pointer
        // can change during sweeping.
-      Address previous_top = new_space_->top();
-      SemiSpaceIterator new_it(new_space_, new_mark);
+      Address previous_top = new_space_.top();
+      SemiSpaceIterator new_it(new_space(), new_mark);
        while (new_it.has_next()) {
          new_it.next()->Iterate(&copy_visitor);
        }
@@ -591,7 +589,7 @@
    }

    // Set age mark.
-  new_space_->set_age_mark(new_mark);
+  new_space_.set_age_mark(new_mark);

    LOG(ResourceEvent("scavenge", "end"));

@@ -718,20 +716,20 @@
    should_record = should_record || FLAG_log_gc;
  #endif
    if (should_record) {
-    if (new_space_->Contains(obj)) {
-      new_space_->RecordAllocation(obj);
+    if (new_space_.Contains(obj)) {
+      new_space_.RecordAllocation(obj);
      } else {
-      new_space_->RecordPromotion(obj);
+      new_space_.RecordPromotion(obj);
      }
    }
  }
  #endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)


-HeapObject* Heap::MigrateObject(HeapObject** source_p,
+HeapObject* Heap::MigrateObject(HeapObject* source,
                                  HeapObject* target,
                                  int size) {
-  void** src = reinterpret_cast<void**>((*source_p)->address());
+  void** src = reinterpret_cast<void**>(source->address());
    void** dst = reinterpret_cast<void**>(target->address());

    // Use block copying memcpy if the object we're migrating is big
@@ -749,7 +747,7 @@
    }

    // Set the forwarding address.
-  (*source_p)->set_map_word(MapWord::FromForwardingAddress(target));
+  source->set_map_word(MapWord::FromForwardingAddress(target));

    // Update NewSpace stats if necessary.
  #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@@ -789,7 +787,7 @@
      *p = object;
      // After patching *p we have to repeat the checks that object is in the
      // active semispace of the young generation and not already copied.
-    if (!InFromSpace(object)) return;
+    if (!InNewSpace(object)) return;
      first_word = object->map_word();
      if (first_word.IsForwardingAddress()) {
        *p = first_word.ToForwardingAddress();
@@ -808,7 +806,7 @@
      result = target_space->AllocateRaw(object_size);

      if (!result->IsFailure()) {
-      *p = MigrateObject(p, HeapObject::cast(result), object_size);
+      *p = MigrateObject(object, HeapObject::cast(result), object_size);
        if (target_space == Heap::old_pointer_space_) {
          // Record the object's address at the top of the to space, to allow
          // it to be swept by the scavenger.
@@ -827,10 +825,10 @@
    }

    // The object should remain in new space or the old space allocation  
failed.
-  result = new_space_->AllocateRaw(object_size);
+  result = new_space_.AllocateRaw(object_size);
    // Failed allocation at this point is utterly unexpected.
    ASSERT(!result->IsFailure());
-  *p = MigrateObject(p, HeapObject::cast(result), object_size);
+  *p = MigrateObject(object, HeapObject::cast(result), object_size);
  }


@@ -1030,7 +1028,7 @@
    // allocation in new space.
    STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
    ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
-  Object* result = new_space_->AllocateRaw(HeapNumber::kSize);
+  Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
    if (result->IsFailure()) return result;
    HeapObject::cast(result)->set_map(heap_number_map());
    HeapNumber::cast(result)->set_value(value);
@@ -2191,7 +2189,7 @@
    PrintF("Heap statistics : ");
    MemoryAllocator::ReportStatistics();
    PrintF("To space : ");
-  new_space_->ReportStatistics();
+  new_space_.ReportStatistics();
    PrintF("Old pointer space : ");
    old_pointer_space_->ReportStatistics();
    PrintF("Old data space : ");
@@ -2215,7 +2213,7 @@
  bool Heap::Contains(Address addr) {
    if (OS::IsOutsideAllocatedSpace(addr)) return false;
    return HasBeenSetup() &&
-    (new_space_->ToSpaceContains(addr) ||
+    (new_space_.ToSpaceContains(addr) ||
       old_pointer_space_->Contains(addr) ||
       old_data_space_->Contains(addr) ||
       code_space_->Contains(addr) ||
@@ -2235,7 +2233,7 @@

    switch (space) {
      case NEW_SPACE:
-      return new_space_->ToSpaceContains(addr);
+      return new_space_.ToSpaceContains(addr);
      case OLD_POINTER_SPACE:
        return old_pointer_space_->Contains(addr);
      case OLD_DATA_SPACE:
@@ -2303,8 +2301,8 @@
  #ifdef DEBUG
  void Heap::ZapFromSpace() {
    ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
-  for (Address a = new_space_->FromSpaceLow();
-       a < new_space_->FromSpaceHigh();
+  for (Address a = new_space_.FromSpaceLow();
+       a < new_space_.FromSpaceHigh();
         a += kPointerSize) {
      Memory::Address_at(a) = kFromSpaceZapValue;
    }
@@ -2322,29 +2320,21 @@
    // Loop over all the pointers in [object_start, object_end).
    while (object_address < object_end) {
      uint32_t rset_word = Memory::uint32_at(rset_address);
-
      if (rset_word != 0) {
-      // Bits were set.
        uint32_t result_rset = rset_word;
-
-      // Loop over all the bits in the remembered set word.  Though
-      // remembered sets are sparse, faster (eg, binary) search for
-      // set bits does not seem to help much here.
-      for (int bit_offset = 0; bit_offset < kBitsPerInt; bit_offset++) {
-        uint32_t bitmask = 1 << bit_offset;
+      for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
          // Do not dereference pointers at or past object_end.
          if ((rset_word & bitmask) != 0 && object_address < object_end) {
            Object** object_p = reinterpret_cast<Object**>(object_address);
-          if (Heap::InFromSpace(*object_p)) {
+          if (Heap::InNewSpace(*object_p)) {
              copy_object_func(reinterpret_cast<HeapObject**>(object_p));
            }
            // If this pointer does not need to be remembered anymore, clear
            // the remembered set bit.
-          if (!Heap::InToSpace(*object_p)) result_rset &= ~bitmask;
+          if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
          }
          object_address += kPointerSize;
        }
-
        // Update the remembered set if it has changed.
        if (result_rset != rset_word) {
          Memory::uint32_at(rset_address) = result_rset;
@@ -2353,7 +2343,6 @@
        // No bits in the word were set.  This is the common case.
        object_address += kPointerSize * kBitsPerInt;
      }
-
      rset_address += kIntSize;
    }
  }
@@ -2517,11 +2506,7 @@
    int old_space_size = young_generation_size_ - code_space_size;

    // Initialize new space.
-  new_space_ = new NewSpace(initial_semispace_size_,
-                            semispace_size_,
-                            NEW_SPACE);
-  if (new_space_ == NULL) return false;
-  if (!new_space_->Setup(new_space_start, young_generation_size_)) return  
false;
+  if (!new_space_.Setup(new_space_start, young_generation_size_)) return  
false;

    // Initialize old space, set the maximum capacity to the old generation
    // size. It will not contain code.
@@ -2579,11 +2564,7 @@
  void Heap::TearDown() {
    GlobalHandles::TearDown();

-  if (new_space_ != NULL) {
-    new_space_->TearDown();
-    delete new_space_;
-    new_space_ = NULL;
-  }
+  new_space_.TearDown();

    if (old_pointer_space_ != NULL) {
      old_pointer_space_->TearDown();

Modified: branches/bleeding_edge/src/heap.h
==============================================================================
--- branches/bleeding_edge/src/heap.h   (original)
+++ branches/bleeding_edge/src/heap.h   Fri Oct 17 02:13:27 2008
@@ -244,11 +244,11 @@
    // Return the starting address and a mask for the new space.   
And-masking an
    // address with the mask will result in the start address of the new  
space
    // for all addresses in either semispace.
-  static Address NewSpaceStart() { return new_space_->start(); }
-  static uint32_t NewSpaceMask() { return new_space_->mask(); }
-  static Address NewSpaceTop() { return new_space_->top(); }
+  static Address NewSpaceStart() { return new_space_.start(); }
+  static uint32_t NewSpaceMask() { return new_space_.mask(); }
+  static Address NewSpaceTop() { return new_space_.top(); }

-  static NewSpace* new_space() { return new_space_; }
+  static NewSpace* new_space() { return &new_space_; }
    static OldSpace* old_pointer_space() { return old_pointer_space_; }
    static OldSpace* old_data_space() { return old_data_space_; }
    static OldSpace* code_space() { return code_space_; }
@@ -256,10 +256,10 @@
    static LargeObjectSpace* lo_space() { return lo_space_; }

    static Address* NewSpaceAllocationTopAddress() {
-    return new_space_->allocation_top_address();
+    return new_space_.allocation_top_address();
    }
    static Address* NewSpaceAllocationLimitAddress() {
-    return new_space_->allocation_limit_address();
+    return new_space_.allocation_limit_address();
    }

    // Allocates and initializes a new JavaScript object based on a
@@ -727,7 +727,7 @@

    static const int kMaxMapSpaceSize = 8*MB;

-  static NewSpace* new_space_;
+  static NewSpace new_space_;
    static OldSpace* old_pointer_space_;
    static OldSpace* old_data_space_;
    static OldSpace* code_space_;
@@ -839,7 +839,7 @@
    // Helper function used by CopyObject to copy a source object to an
    // allocated target object and update the forwarding pointer in the  
source
    // object.  Returns the target object.
-  static HeapObject* MigrateObject(HeapObject** source_p,
+  static HeapObject* MigrateObject(HeapObject* source,
                                     HeapObject* target,
                                     int size);


Modified: branches/bleeding_edge/src/spaces-inl.h
==============================================================================
--- branches/bleeding_edge/src/spaces-inl.h     (original)
+++ branches/bleeding_edge/src/spaces-inl.h     Fri Oct 17 02:13:27 2008
@@ -308,7 +308,7 @@
    alloc_info->top = new_top;
  #ifdef DEBUG
    SemiSpace* space =
-      (alloc_info == &allocation_info_) ? to_space_ : from_space_;
+      (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
    ASSERT(space->low() <= alloc_info->top
           && alloc_info->top <= space->high()
           && alloc_info->limit == space->high());

Modified: branches/bleeding_edge/src/spaces.cc
==============================================================================
--- branches/bleeding_edge/src/spaces.cc        (original)
+++ branches/bleeding_edge/src/spaces.cc        Fri Oct 17 02:13:27 2008
@@ -36,9 +36,9 @@
  // For contiguous spaces, top should be in the space (or at the end) and  
limit
  // should be the end of the space.
  #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
-  ASSERT((space)->low() <= (info).top                 \
-         && (info).top <= (space)->high()             \
-         && (info).limit == (space)->high())
+  ASSERT((space).low() <= (info).top                 \
+         && (info).top <= (space).high()             \
+         && (info).limit == (space).high())


  //  
----------------------------------------------------------------------------
@@ -760,16 +760,19 @@
  //  
-----------------------------------------------------------------------------
  // NewSpace implementation

-NewSpace::NewSpace(int initial_semispace_capacity,
-                   int maximum_semispace_capacity,
-                   AllocationSpace id)
-    : Space(id, NOT_EXECUTABLE) {
+
+bool NewSpace::Setup(Address start, int size) {
+  // Setup new space based on the preallocated memory block defined by
+  // start and size. The provided space is divided into two semi-spaces.
+  // To support fast containment testing in the new space, the size of
+  // this chunk must be a power of two and it must be aligned to its size.
+  int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
+  int maximum_semispace_capacity = Heap::SemiSpaceSize();
+
    ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
    ASSERT(IsPowerOf2(maximum_semispace_capacity));
    maximum_capacity_ = maximum_semispace_capacity;
    capacity_ = initial_semispace_capacity;
-  to_space_ = new SemiSpace(capacity_, maximum_capacity_, id);
-  from_space_ = new SemiSpace(capacity_, maximum_capacity_, id);

    // Allocate and setup the histogram arrays if necessary.
  #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@@ -781,19 +784,16 @@
    INSTANCE_TYPE_LIST(SET_NAME)
  #undef SET_NAME
  #endif
-}
-

-bool NewSpace::Setup(Address start, int size) {
    ASSERT(size == 2 * maximum_capacity_);
    ASSERT(IsAddressAligned(start, size, 0));

-  if (to_space_ == NULL
-      || !to_space_->Setup(start, maximum_capacity_)) {
+  if (!to_space_.Setup(start, capacity_, maximum_capacity_)) {
      return false;
    }
-  if (from_space_ == NULL
-      || !from_space_->Setup(start + maximum_capacity_,  
maximum_capacity_)) {
+  if (!from_space_.Setup(start + maximum_capacity_,
+                         capacity_,
+                         maximum_capacity_)) {
      return false;
    }

@@ -802,8 +802,8 @@
    object_mask_ = address_mask_ | kHeapObjectTag;
    object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;

-  allocation_info_.top = to_space_->low();
-  allocation_info_.limit = to_space_->high();
+  allocation_info_.top = to_space_.low();
+  allocation_info_.limit = to_space_.high();
    mc_forwarding_info_.top = NULL;
    mc_forwarding_info_.limit = NULL;

@@ -831,22 +831,13 @@
    mc_forwarding_info_.top = NULL;
    mc_forwarding_info_.limit = NULL;

-  if (to_space_ != NULL) {
-    to_space_->TearDown();
-    delete to_space_;
-    to_space_ = NULL;
-  }
-
-  if (from_space_ != NULL) {
-    from_space_->TearDown();
-    delete from_space_;
-    from_space_ = NULL;
-  }
+  to_space_.TearDown();
+  from_space_.TearDown();
  }


  void NewSpace::Flip() {
-  SemiSpace* tmp = from_space_;
+  SemiSpace tmp = from_space_;
    from_space_ = to_space_;
    to_space_ = tmp;
  }
@@ -857,24 +848,24 @@
    // TODO(1240712): Failure to double the from space can result in
    // semispaces of different sizes.  In the event of that failure, the
    // to space doubling should be rolled back before returning false.
-  if (!to_space_->Double() || !from_space_->Double()) return false;
+  if (!to_space_.Double() || !from_space_.Double()) return false;
    capacity_ *= 2;
-  allocation_info_.limit = to_space_->high();
+  allocation_info_.limit = to_space_.high();
    ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    return true;
  }


  void NewSpace::ResetAllocationInfo() {
-  allocation_info_.top = to_space_->low();
-  allocation_info_.limit = to_space_->high();
+  allocation_info_.top = to_space_.low();
+  allocation_info_.limit = to_space_.high();
    ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
  }


  void NewSpace::MCResetRelocationInfo() {
-  mc_forwarding_info_.top = from_space_->low();
-  mc_forwarding_info_.limit = from_space_->high();
+  mc_forwarding_info_.top = from_space_.low();
+  mc_forwarding_info_.limit = from_space_.high();
    ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
  }

@@ -883,7 +874,7 @@
    // Assumes that the spaces have been flipped so that mc_forwarding_info_  
is
    // valid allocation info for the to space.
    allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = to_space_->high();
+  allocation_info_.limit = to_space_.high();
    ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
  }

@@ -897,7 +888,7 @@

    // There should be objects packed in from the low address up to the
    // allocation pointer.
-  Address current = to_space_->low();
+  Address current = to_space_.low();
    while (current < top()) {
      HeapObject* object = HeapObject::FromAddress(current);

@@ -931,22 +922,24 @@
  //  
-----------------------------------------------------------------------------
  // SemiSpace implementation

-SemiSpace::SemiSpace(int initial_capacity,
-                     int maximum_capacity,
-                     AllocationSpace id)
-    : Space(id, NOT_EXECUTABLE), capacity_(initial_capacity),
-      maximum_capacity_(maximum_capacity), start_(NULL), age_mark_(NULL) {
-}
-
+bool SemiSpace::Setup(Address start,
+                      int initial_capacity,
+                      int maximum_capacity) {
+  // Creates a space in the young generation. The constructor does not
+  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk  
of
+  // memory of size 'capacity' when set up, and does not grow or shrink
+  // otherwise.  In the mark-compact collector, the memory region of the  
from
+  // space is used as the marking stack. It requires contiguous memory
+  // addresses.
+  capacity_ = initial_capacity;
+  maximum_capacity_ = maximum_capacity;

-bool SemiSpace::Setup(Address start, int size) {
-  ASSERT(size == maximum_capacity_);
    if (!MemoryAllocator::CommitBlock(start, capacity_, executable())) {
      return false;
    }

    start_ = start;
-  address_mask_ = ~(size - 1);
+  address_mask_ = ~(maximum_capacity - 1);
    object_mask_ = address_mask_ | kHeapObjectTag;
    object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;

@@ -1002,7 +995,7 @@
    ASSERT(space->ToSpaceContains(start));
    ASSERT(space->ToSpaceLow() <= end
           && end <= space->ToSpaceHigh());
-  space_ = space->to_space_;
+  space_ = &space->to_space_;
    current_ = start;
    limit_ = end;
    size_func_ = size_func;

Modified: branches/bleeding_edge/src/spaces.h
==============================================================================
--- branches/bleeding_edge/src/spaces.h (original)
+++ branches/bleeding_edge/src/spaces.h Fri Oct 17 02:13:27 2008
@@ -878,19 +878,14 @@

  class SemiSpace : public Space {
   public:
-  // Creates a space in the young generation. The constructor does not
-  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk  
of
-  // memory of size 'capacity' when set up, and does not grow or shrink
-  // otherwise.  In the mark-compact collector, the memory region of the  
from
-  // space is used as the marking stack. It requires contiguous memory
-  // addresses.
-  SemiSpace(int initial_capacity,
-            int maximum_capacity,
-            AllocationSpace id);
-  virtual ~SemiSpace() {}
+  // Constructor.
+  SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
+    start_ = NULL;
+    age_mark_ = NULL;
+  }

    // Sets up the semispace using the given chunk.
-  bool Setup(Address start, int size);
+  bool Setup(Address start, int initial_capacity, int maximum_capacity);

    // Tear down the space.  Heap memory was not allocated by the space, so  
it
    // is not deallocated here.
@@ -1016,16 +1011,8 @@

  class NewSpace : public Space {
   public:
-  // Create a new space with a given allocation capacity (ie, the capacity  
of
-  // *one* of the semispaces).  The constructor does not allocate heap  
memory
-  // from the OS.  When the space is set up, it is given a contiguous  
chunk of
-  // memory of size 2 * semispace_capacity.  To support fast containment
-  // testing in the new space, the size of this chunk must be a power of  
two
-  // and it must be aligned to its size.
-  NewSpace(int initial_semispace_capacity,
-           int maximum_semispace_capacity,
-           AllocationSpace id);
-  virtual ~NewSpace() {}
+  // Constructor.
+  NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}

    // Sets up the new space using the given chunk.
    bool Setup(Address start, int size);
@@ -1036,7 +1023,7 @@

    // True if the space has been set up but not torn down.
    bool HasBeenSetup() {
-    return to_space_->HasBeenSetup() && from_space_->HasBeenSetup();
+    return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
    }

    // Flip the pair of spaces.
@@ -1069,12 +1056,12 @@
    // Return the address of the allocation pointer in the active semispace.
    Address top() { return allocation_info_.top; }
    // Return the address of the first object in the active semispace.
-  Address bottom() { return to_space_->low(); }
+  Address bottom() { return to_space_.low(); }

    // Get the age mark of the inactive semispace.
-  Address age_mark() { return from_space_->age_mark(); }
+  Address age_mark() { return from_space_.age_mark(); }
    // Set the age mark in the active semispace.
-  void set_age_mark(Address mark) { to_space_->set_age_mark(mark); }
+  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }

    // The start address of the space and a bit mask. Anding an address in  
the
    // new space with the mask will result in the start address.
@@ -1105,36 +1092,36 @@
    void MCCommitRelocationInfo();

    // Get the extent of the inactive semispace (for use as a marking stack).
-  Address FromSpaceLow() { return from_space_->low(); }
-  Address FromSpaceHigh() { return from_space_->high(); }
+  Address FromSpaceLow() { return from_space_.low(); }
+  Address FromSpaceHigh() { return from_space_.high(); }

    // Get the extent of the active semispace (to sweep newly copied objects
    // during a scavenge collection).
-  Address ToSpaceLow() { return to_space_->low(); }
-  Address ToSpaceHigh() { return to_space_->high(); }
+  Address ToSpaceLow() { return to_space_.low(); }
+  Address ToSpaceHigh() { return to_space_.high(); }

    // Offsets from the beginning of the semispaces.
    int ToSpaceOffsetForAddress(Address a) {
-    return to_space_->SpaceOffsetForAddress(a);
+    return to_space_.SpaceOffsetForAddress(a);
    }
    int FromSpaceOffsetForAddress(Address a) {
-    return from_space_->SpaceOffsetForAddress(a);
+    return from_space_.SpaceOffsetForAddress(a);
    }

    // True if the object is a heap object in the address range of the
    // respective semispace (not necessarily below the allocation pointer of  
the
    // semispace).
-  bool ToSpaceContains(Object* o) { return to_space_->Contains(o); }
-  bool FromSpaceContains(Object* o) { return from_space_->Contains(o); }
+  bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+  bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }

-  bool ToSpaceContains(Address a) { return to_space_->Contains(a); }
-  bool FromSpaceContains(Address a) { return from_space_->Contains(a); }
+  bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
+  bool FromSpaceContains(Address a) { return from_space_.Contains(a); }

  #ifdef DEBUG
    // Verify the active semispace.
    virtual void Verify();
    // Print the active semispace.
-  virtual void Print() { to_space_->Print(); }
+  virtual void Print() { to_space_.Print(); }
  #endif

  #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@@ -1158,8 +1145,8 @@
    int maximum_capacity_;

    // The semispaces.
-  SemiSpace* to_space_;
-  SemiSpace* from_space_;
+  SemiSpace to_space_;
+  SemiSpace from_space_;

    // Start address and bit mask for containment testing.
    Address start_;

Modified: branches/bleeding_edge/test/cctest/test-spaces.cc
==============================================================================
--- branches/bleeding_edge/test/cctest/test-spaces.cc   (original)
+++ branches/bleeding_edge/test/cctest/test-spaces.cc   Fri Oct 17 02:13:27  
2008
@@ -157,27 +157,23 @@
    CHECK(Heap::ConfigureHeapDefault());
    CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));

-  NewSpace* s = new NewSpace(Heap::InitialSemiSpaceSize(),
-                             Heap::SemiSpaceSize(),
-                             NEW_SPACE);
-  CHECK(s != NULL);
+  NewSpace new_space;

    void* chunk =
        MemoryAllocator::ReserveInitialChunk(2 *  
Heap::YoungGenerationSize());
    CHECK(chunk != NULL);
    Address start = RoundUp(static_cast<Address>(chunk),
                            Heap::YoungGenerationSize());
-  CHECK(s->Setup(start, Heap::YoungGenerationSize()));
-  CHECK(s->HasBeenSetup());
+  CHECK(new_space.Setup(start, Heap::YoungGenerationSize()));
+  CHECK(new_space.HasBeenSetup());

-  while (s->Available() >= Page::kMaxHeapObjectSize) {
-    Object* obj = s->AllocateRaw(Page::kMaxHeapObjectSize);
+  while (new_space.Available() >= Page::kMaxHeapObjectSize) {
+    Object* obj = new_space.AllocateRaw(Page::kMaxHeapObjectSize);
      CHECK(!obj->IsFailure());
-    CHECK(s->Contains(HeapObject::cast(obj)));
+    CHECK(new_space.Contains(HeapObject::cast(obj)));
    }

-  s->TearDown();
-  delete s;
+  new_space.TearDown();
    MemoryAllocator::TearDown();
  }


--~--~---------~--~----~------------~-------~--~----~
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
-~----------~----~----~----~------~----~------~--~---

Reply via email to