Revision: 12483
Author:   [email protected]
Date:     Tue Sep 11 07:01:39 2012
Log:      Fix invariant so that we cannot record relocation slots for
white objects when compacting.  Add flag for incremental code
compaction.
Review URL: https://chromiumcodereview.appspot.com/10907174
http://code.google.com/p/v8/source/detail?r=12483

Modified:
 /branches/bleeding_edge/src/flag-definitions.h
 /branches/bleeding_edge/src/incremental-marking-inl.h
 /branches/bleeding_edge/src/incremental-marking.cc
 /branches/bleeding_edge/src/incremental-marking.h
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/mark-compact.h

=======================================
--- /branches/bleeding_edge/src/flag-definitions.h      Fri Sep  7 02:01:54 2012
+++ /branches/bleeding_edge/src/flag-definitions.h      Tue Sep 11 07:01:39 2012
@@ -412,6 +412,8 @@
             "Never perform compaction on full GC - testing only")
 DEFINE_bool(compact_code_space, true,
             "Compact code space on full non-incremental collections")
+DEFINE_bool(incremental_code_compaction, false,
+            "Compact code space on full incremental collections")
 DEFINE_bool(cleanup_code_caches_at_gc, true,
             "Flush inline caches prior to mark compact collection and "
             "flush code caches in maps during mark compact cycle.")
=======================================
--- /branches/bleeding_edge/src/incremental-marking-inl.h Tue Jul 10 05:52:36 2012 +++ /branches/bleeding_edge/src/incremental-marking-inl.h Tue Sep 11 07:01:39 2012
@@ -48,7 +48,9 @@
     // Object is either grey or white.  It will be scanned if survives.
     return false;
   }
-  return true;
+  if (!is_compacting_) return false;
+  MarkBit obj_bit = Marking::MarkBitFrom(obj);
+  return Marking::IsBlack(obj_bit);
 }


=======================================
--- /branches/bleeding_edge/src/incremental-marking.cc Fri Aug 17 02:03:08 2012 +++ /branches/bleeding_edge/src/incremental-marking.cc Tue Sep 11 07:01:39 2012
@@ -67,7 +67,7 @@
 void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
                                          Object** slot,
                                          Object* value) {
- if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
+  if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
     MarkBit obj_bit = Marking::MarkBitFrom(obj);
     if (Marking::IsBlack(obj_bit)) {
       // Object is not going to be rescanned we need to record the slot.
@@ -127,9 +127,9 @@


 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
-                                                Object** slot,
-                                                Code* value) {
-  if (BaseRecordWrite(host, slot, value) && is_compacting_) {
+                                                    Object** slot,
+                                                    Code* value) {
+  if (BaseRecordWrite(host, slot, value)) {
     ASSERT(slot != NULL);
     heap_->mark_compact_collector()->
         RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
=======================================
--- /branches/bleeding_edge/src/incremental-marking.h Wed Jul 25 08:23:07 2012 +++ /branches/bleeding_edge/src/incremental-marking.h Tue Sep 11 07:01:39 2012
@@ -132,6 +132,12 @@
                                                Object** slot,
                                                Isolate* isolate);

+  // Record a slot for compaction.  Returns false for objects that are
+  // guaranteed to be rescanned or not guaranteed to survive.
+  //
+ // No slots in white objects should be recorded, as some slots are typed and + // cannot be interpreted corrrectly if the underlying object does not survive
+  // the incremental cycle (stays white).
INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
   INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
   INLINE(void RecordWriteIntoCode(HeapObject* obj,
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Mon Aug 20 04:35:50 2012
+++ /branches/bleeding_edge/src/mark-compact.cc Tue Sep 11 07:01:39 2012
@@ -343,7 +343,9 @@
     CollectEvacuationCandidates(heap()->old_pointer_space());
     CollectEvacuationCandidates(heap()->old_data_space());

-    if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) {
+    if (FLAG_compact_code_space &&
+        (mode == NON_INCREMENTAL_COMPACTION ||
+         FLAG_incremental_code_compaction)) {
       CollectEvacuationCandidates(heap()->code_space());
     } else if (FLAG_trace_fragmentation) {
       TraceFragmentation(heap()->code_space());
@@ -1443,7 +1445,7 @@
     } else {
       // Don't visit code object.

-      // Visit shared function info to avoid double checking of it's
+      // Visit shared function info to avoid double checking of its
       // flushability.
       SharedFunctionInfo* shared_info = object->unchecked_shared();
       MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
@@ -1704,7 +1706,7 @@

 void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
   // For optimized functions we should retain both non-optimized version
-  // of it's code and non-optimized version of all inlined functions.
+  // of its code and non-optimized version of all inlined functions.
   // This is required to support bailing out from inlined code.
   DeoptimizationInputData* data =
       DeoptimizationInputData::cast(code->deoptimization_data());
@@ -2300,7 +2302,7 @@
     // non-incremental marker can deal with them as if overflow
     // occured during normal marking.
     // But incremental marker uses a separate marking deque
-    // so we have to explicitly copy it's overflow state.
+    // so we have to explicitly copy its overflow state.
     incremental_marking->Finalize();
     incremental_marking_overflowed =
         incremental_marking->marking_deque()->overflowed();
=======================================
--- /branches/bleeding_edge/src/mark-compact.h  Mon Aug 13 01:43:16 2012
+++ /branches/bleeding_edge/src/mark-compact.h  Tue Sep 11 07:01:39 2012
@@ -304,6 +304,26 @@
     NUMBER_OF_SLOT_TYPES
   };

+  static const char* SlotTypeToString(SlotType type) {
+    switch (type) {
+      case EMBEDDED_OBJECT_SLOT:
+        return "EMBEDDED_OBJECT_SLOT";
+      case RELOCATED_CODE_OBJECT:
+        return "RELOCATED_CODE_OBJECT";
+      case CODE_TARGET_SLOT:
+        return "CODE_TARGET_SLOT";
+      case CODE_ENTRY_SLOT:
+        return "CODE_ENTRY_SLOT";
+      case DEBUG_TARGET_SLOT:
+        return "DEBUG_TARGET_SLOT";
+      case JS_RETURN_SLOT:
+        return "JS_RETURN_SLOT";
+      case NUMBER_OF_SLOT_TYPES:
+        return "NUMBER_OF_SLOT_TYPES";
+    }
+    return "UNKNOWN SlotType";
+  }
+
   void UpdateSlots(Heap* heap);

   void UpdateSlotsWithFilter(Heap* heap);

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to