Revision: 22858
Author:   [email protected]
Date:     Tue Aug  5 11:16:11 2014 UTC
Log:      Move left and right trimming of FixedArray into Heap.

[email protected]
BUG=v8:3231
LOG=n

Review URL: https://codereview.chromium.org/200443004
http://code.google.com/p/v8/source/detail?r=22858

Modified:
 /branches/bleeding_edge/src/builtins.cc
 /branches/bleeding_edge/src/elements.cc
 /branches/bleeding_edge/src/heap/heap.cc
 /branches/bleeding_edge/src/heap/heap.h
 /branches/bleeding_edge/src/objects.cc

=======================================
--- /branches/bleeding_edge/src/builtins.cc     Tue Aug  5 08:18:22 2014 UTC
+++ /branches/bleeding_edge/src/builtins.cc     Tue Aug  5 11:16:11 2014 UTC
@@ -180,70 +180,6 @@
   MemMove(dst->data_start() + dst_index, src->data_start() + src_index,
           len * kDoubleSize);
 }
-
-
-static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
-                                          FixedArrayBase* elms,
-                                          int to_trim) {
-  DCHECK(heap->CanMoveObjectStart(elms));
-
-  Map* map = elms->map();
-  int entry_size;
-  if (elms->IsFixedArray()) {
-    entry_size = kPointerSize;
-  } else {
-    entry_size = kDoubleSize;
-  }
-  DCHECK(elms->map() != heap->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
-  // In large object space the object's start must coincide with chunk
-  // and thus the trick is just not applicable.
-  DCHECK(!heap->lo_space()->Contains(elms));
-
-  STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
-  STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
-  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
-
-  Object** former_start = HeapObject::RawField(elms, 0);
-
-  const int len = elms->length();
-
-  if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
-      elms->IsFixedArray() &&
-      !heap->new_space()->Contains(elms)) {
- // If we are doing a big trim in old space then we zap the space that was
-    // formerly part of the array so that the GC (aided by the card-based
-    // remembered set) won't find pointers to new-space there.
-    Object** zap = reinterpret_cast<Object**>(elms->address());
-    zap++;  // Header of filler must be at least one word so skip that.
-    for (int i = 1; i < to_trim; i++) {
-      *zap++ = Smi::FromInt(0);
-    }
-  }
-  // Technically in new space this write might be omitted (except for
-  // debug mode which iterates through the heap), but to play safer
-  // we still do it.
- // Since left trimming is only performed on pages which are not concurrently
-  // swept creating a filler object does not require synchronization.
-  heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
-
-  int new_start_index = to_trim * (entry_size / kPointerSize);
-  former_start[new_start_index] = map;
-  former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
-
-  // Maintain marking consistency for HeapObjectIterator and
-  // IncrementalMarking.
-  int size_delta = to_trim * entry_size;
-  Address new_start = elms->address() + size_delta;
-  heap->marking()->TransferMark(elms->address(), new_start);
-  heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR);
-
-  FixedArrayBase* new_elms =
-      FixedArrayBase::cast(HeapObject::FromAddress(new_start));
-
-  heap->OnMoveEvent(new_elms, elms, new_elms->Size());
-  return new_elms;
-}


 static bool ArrayPrototypeHasNoElements(Heap* heap,
@@ -536,7 +472,7 @@
   }

   if (heap->CanMoveObjectStart(*elms_obj)) {
-    array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
+    array->set_elements(heap->LeftTrimFixedArray(*elms_obj, 1));
   } else {
     // Shift the elements.
     if (elms_obj->IsFixedArray()) {
@@ -881,7 +817,7 @@

       if (heap->CanMoveObjectStart(*elms_obj)) {
         // On the fast path we move the start of the object in memory.
- elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta), isolate);
+        elms_obj = handle(heap->LeftTrimFixedArray(*elms_obj, delta));
       } else {
// This is the slow path. We are going to move the elements to the left
         // by copying them. For trimmed values we store the hole.
=======================================
--- /branches/bleeding_edge/src/elements.cc     Mon Aug  4 11:34:54 2014 UTC
+++ /branches/bleeding_edge/src/elements.cc     Tue Aug  5 11:16:11 2014 UTC
@@ -873,8 +873,7 @@

 // Super class for all fast element arrays.
 template<typename FastElementsAccessorSubclass,
-         typename KindTraits,
-         int ElementSize>
+         typename KindTraits>
 class FastElementsAccessor
: public ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits> {
  public:
@@ -916,15 +915,8 @@
         if (length == 0) {
           array->initialize_elements();
         } else {
-          int filler_size = (old_capacity - length) * ElementSize;
-          Address filler_start = backing_store->address() +
-              BackingStore::OffsetOfElementAt(length);
- array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
-
- // We are storing the new length using release store after creating a
-          // filler for the left-over space to avoid races with the sweeper
-          // thread.
-          backing_store->synchronized_set_length(length);
+          isolate->heap()->RightTrimFixedArray<Heap::FROM_MUTATOR>(
+              *backing_store, old_capacity - length);
         }
       } else {
         // Otherwise, fill the unused tail with holes.
@@ -1078,14 +1070,11 @@
 template<typename FastElementsAccessorSubclass,
          typename KindTraits>
 class FastSmiOrObjectElementsAccessor
-    : public FastElementsAccessor<FastElementsAccessorSubclass,
-                                  KindTraits,
-                                  kPointerSize> {
+ : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> {
  public:
   explicit FastSmiOrObjectElementsAccessor(const char* name)
       : FastElementsAccessor<FastElementsAccessorSubclass,
-                             KindTraits,
-                             kPointerSize>(name) {}
+                             KindTraits>(name) {}

   static void CopyElementsImpl(Handle<FixedArrayBase> from,
                                uint32_t from_start,
@@ -1199,14 +1188,11 @@
 template<typename FastElementsAccessorSubclass,
          typename KindTraits>
 class FastDoubleElementsAccessor
-    : public FastElementsAccessor<FastElementsAccessorSubclass,
-                                  KindTraits,
-                                  kDoubleSize> {
+ : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> {
  public:
   explicit FastDoubleElementsAccessor(const char* name)
       : FastElementsAccessor<FastElementsAccessorSubclass,
-                             KindTraits,
-                             kDoubleSize>(name) {}
+                             KindTraits>(name) {}

   static void SetFastElementsCapacityAndLength(Handle<JSObject> obj,
                                                uint32_t capacity,
=======================================
--- /branches/bleeding_edge/src/heap/heap.cc    Tue Aug  5 09:40:09 2014 UTC
+++ /branches/bleeding_edge/src/heap/heap.cc    Tue Aug  5 11:16:11 2014 UTC
@@ -3258,6 +3258,125 @@
     }
   }
 }
+
+
+static void ZapFixedArrayForTrimming(Address address, int elements_to_trim) {
+  Object** zap = reinterpret_cast<Object**>(address);
+  zap++;  // Header of filler must be at least one word so skip that.
+  for (int i = 1; i < elements_to_trim; i++) {
+    *zap++ = Smi::FromInt(0);
+  }
+}
+
+
+FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
+                                         int elements_to_trim) {
+ const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
+  const int bytes_to_trim = elements_to_trim * element_size;
+  Map* map = object->map();
+
+  // For now this trick is only applied to objects in new and paged space.
+  // In large object space the object's start must coincide with chunk
+  // and thus the trick is just not applicable.
+  DCHECK(!lo_space()->Contains(object));
+  DCHECK(object->map() != fixed_cow_array_map());
+
+  STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
+  STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
+  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
+
+  const int len = object->length();
+  DCHECK(elements_to_trim <= len);
+
+  // Calculate location of new array start.
+  Address new_start = object->address() + bytes_to_trim;
+
+  if (bytes_to_trim > FreeSpace::kHeaderSize &&
+      object->IsFixedArray() &&
+      !new_space()->Contains(object)) {
+ // If we are doing a big trim in old space then we zap the space that was
+    // formerly part of the array so that the GC (aided by the card-based
+    // remembered set) won't find pointers to new-space there.
+    ZapFixedArrayForTrimming(object->address(), elements_to_trim);
+  }
+
+  // Technically in new space this write might be omitted (except for
+  // debug mode which iterates through the heap), but to play safer
+  // we still do it.
+  CreateFillerObjectAt(object->address(), bytes_to_trim);
+
+  // Initialize header of the trimmed array. Since left trimming is only
+  // performed on pages which are not concurrently swept creating a filler
+  // object does not require synchronization.
+  DCHECK(CanMoveObjectStart(object));
+  Object** former_start = HeapObject::RawField(object, 0);
+  int new_start_index = elements_to_trim * (element_size / kPointerSize);
+  former_start[new_start_index] = map;
+  former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
+  FixedArrayBase* new_object =
+      FixedArrayBase::cast(HeapObject::FromAddress(new_start));
+
+  // Maintain consistency of live bytes during incremental marking
+  marking()->TransferMark(object->address(), new_start);
+  AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
+
+  // Notify the heap profiler of change in object layout.
+  OnMoveEvent(new_object, object, new_object->Size());
+  return new_object;
+}
+
+
+// Force instantiation of templatized method.
+template
+void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
+template
+void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
+
+
+template<Heap::InvocationMode mode>
+void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) { + const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
+  const int bytes_to_trim = elements_to_trim * element_size;
+
+  // For now this trick is only applied to objects in new and paged space.
+  DCHECK(!lo_space()->Contains(object));
+  DCHECK(object->map() != fixed_cow_array_map());
+
+  const int len = object->length();
+  DCHECK(elements_to_trim < len);
+
+  // Calculate location of new array end.
+  Address new_end = object->address() + object->Size() - bytes_to_trim;
+
+  if (bytes_to_trim > FreeSpace::kHeaderSize &&
+      object->IsFixedArray() &&
+      (mode != Heap::FROM_GC || Heap::ShouldZapGarbage())) {
+ // If we are doing a big trim in old space then we zap the space that was
+    // formerly part of the array so that the GC (aided by the card-based
+    // remembered set) won't find pointers to new-space there.
+    ZapFixedArrayForTrimming(new_end, elements_to_trim);
+  }
+
+  // Technically in new space this write might be omitted (except for
+  // debug mode which iterates through the heap), but to play safer
+  // we still do it.
+  CreateFillerObjectAt(new_end, bytes_to_trim);
+
+  // Initialize header of the trimmed array. We are storing the new length
+  // using release store after creating a filler for the left-over space to
+  // avoid races with the sweeper thread.
+  object->synchronized_set_length(len - elements_to_trim);
+
+  // Maintain consistency of live bytes during incremental marking
+  AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
+
+ // Notify the heap profiler of change in object layout. The array may not be
+  // moved during GC, and size has to be adjusted nevertheless.
+  HeapProfiler* profiler = isolate()->heap_profiler();
+  if (profiler->is_tracking_allocations()) {
+    profiler->UpdateObjectSizeEvent(object->address(), object->Size());
+  }
+}


 AllocationResult Heap::AllocateExternalArray(int length,
=======================================
--- /branches/bleeding_edge/src/heap/heap.h     Tue Aug  5 08:18:22 2014 UTC
+++ /branches/bleeding_edge/src/heap/heap.h     Tue Aug  5 11:16:11 2014 UTC
@@ -700,16 +700,26 @@
   inline void FinalizeExternalString(String* string);

// Initialize a filler object to keep the ability to iterate over the heap
-  // when shortening objects.
+  // when introducing gaps within pages.
   void CreateFillerObjectAt(Address addr, int size);

   bool CanMoveObjectStart(HeapObject* object);

+ // Indicates whether live bytes adjustment is triggered from within the GC
+  // code or from mutator code.
   enum InvocationMode { FROM_GC, FROM_MUTATOR };

-  // Maintain marking consistency for IncrementalMarking.
+  // Maintain consistency of live bytes during incremental marking.
   void AdjustLiveBytes(Address address, int by, InvocationMode mode);

+ // Trim the given array from the left. Note that this relocates the object + // start and hence is only valid if there is only a single reference to it. + FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+
+  // Trim the given array from the right.
+  template<Heap::InvocationMode mode>
+  void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+
   // Converts the given boolean condition to JavaScript boolean value.
   inline Object* ToBoolean(bool condition);

=======================================
--- /branches/bleeding_edge/src/objects.cc      Tue Aug  5 09:24:27 2014 UTC
+++ /branches/bleeding_edge/src/objects.cc      Tue Aug  5 11:16:11 2014 UTC
@@ -1989,54 +1989,6 @@
       return NULL;
   }
 }
-
-
-static void ZapEndOfFixedArray(Address new_end, int to_trim) {
-  // If we are doing a big trim in old space then we zap the space.
-  Object** zap = reinterpret_cast<Object**>(new_end);
-  zap++;  // Header of filler must be at least one word so skip that.
-  for (int i = 1; i < to_trim; i++) {
-    *zap++ = Smi::FromInt(0);
-  }
-}
-
-
-template<Heap::InvocationMode mode>
-static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
-  DCHECK(elms->map() != heap->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
-  DCHECK(!heap->lo_space()->Contains(elms));
-
-  const int len = elms->length();
-
-  DCHECK(to_trim < len);
-
-  Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
-
-  if (mode != Heap::FROM_GC || Heap::ShouldZapGarbage()) {
-    ZapEndOfFixedArray(new_end, to_trim);
-  }
-
-  int size_delta = to_trim * kPointerSize;
-
-  // Technically in new space this write might be omitted (except for
-  // debug mode which iterates through the heap), but to play safer
-  // we still do it.
-  heap->CreateFillerObjectAt(new_end, size_delta);
-
- // We are storing the new length using release store after creating a filler
-  // for the left-over space to avoid races with the sweeper thread.
-  elms->synchronized_set_length(len - to_trim);
-
-  heap->AdjustLiveBytes(elms->address(), -size_delta, mode);
-
-  // The array may not be moved during GC,
-  // and size has to be adjusted nevertheless.
-  HeapProfiler* profiler = heap->isolate()->heap_profiler();
-  if (profiler->is_tracking_allocations()) {
-    profiler->UpdateObjectSizeEvent(elms->address(), elms->Size());
-  }
-}


 bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
@@ -2246,7 +2198,7 @@
// If there are properties in the new backing store, trim it to the correct
   // size and install the backing store into the object.
   if (external > 0) {
-    RightTrimFixedArray<Heap::FROM_MUTATOR>(heap, *array, inobject);
+    heap->RightTrimFixedArray<Heap::FROM_MUTATOR>(*array, inobject);
     object->set_properties(*array);
   }

@@ -8225,8 +8177,8 @@
 void FixedArray::Shrink(int new_length) {
   DCHECK(0 <= new_length && new_length <= length());
   if (new_length < length()) {
-    RightTrimFixedArray<Heap::FROM_MUTATOR>(
-        GetHeap(), this, length() - new_length);
+    GetHeap()->RightTrimFixedArray<Heap::FROM_MUTATOR>(
+        this, length() - new_length);
   }
 }

@@ -9592,12 +9544,12 @@

   int to_trim = enum_cache->length() - live_enum;
   if (to_trim <= 0) return;
-  RightTrimFixedArray<Heap::FROM_GC>(
-      heap, descriptors->GetEnumCache(), to_trim);
+  heap->RightTrimFixedArray<Heap::FROM_GC>(
+      descriptors->GetEnumCache(), to_trim);

   if (!descriptors->HasEnumIndicesCache()) return;
   FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
-  RightTrimFixedArray<Heap::FROM_GC>(heap, enum_indices_cache, to_trim);
+  heap->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
 }


@@ -9609,8 +9561,8 @@
   int to_trim = number_of_descriptors - number_of_own_descriptors;
   if (to_trim == 0) return;

-  RightTrimFixedArray<Heap::FROM_GC>(
-      heap, descriptors, to_trim * DescriptorArray::kDescriptorSize);
+  heap->RightTrimFixedArray<Heap::FROM_GC>(
+      descriptors, to_trim * DescriptorArray::kDescriptorSize);
   descriptors->SetNumberOfDescriptors(number_of_own_descriptors);

   if (descriptors->HasEnumCache()) TrimEnumCache(heap, map, descriptors);
@@ -9687,7 +9639,7 @@
   // transition array disappeared during GC.
   int trim = t->number_of_transitions() - transition_index;
   if (trim > 0) {
-    RightTrimFixedArray<Heap::FROM_GC>(heap, t, t->IsSimpleTransition()
+    heap->RightTrimFixedArray<Heap::FROM_GC>(t, t->IsSimpleTransition()
         ? trim : trim * TransitionArray::kTransitionSize);
   }
   DCHECK(HasTransitionArray());
@@ -9963,7 +9915,7 @@
   }
   if (dst != length) {
     // Always trim even when array is cleared because of heap verifier.
- RightTrimFixedArray<Heap::FROM_MUTATOR>(GetHeap(), code_map, length - dst); + GetHeap()->RightTrimFixedArray<Heap::FROM_MUTATOR>(code_map, length - dst);
     if (code_map->length() == kEntriesStart) ClearOptimizedCodeMap();
   }
 }
@@ -9974,7 +9926,7 @@
   DCHECK(shrink_by % kEntryLength == 0);
   DCHECK(shrink_by <= code_map->length() - kEntriesStart);
   // Always trim even when array is cleared because of heap verifier.
-  RightTrimFixedArray<Heap::FROM_GC>(GetHeap(), code_map, shrink_by);
+  GetHeap()->RightTrimFixedArray<Heap::FROM_GC>(code_map, shrink_by);
   if (code_map->length() == kEntriesStart) {
     ClearOptimizedCodeMap();
   }

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to