Reviewers: Kevin Millikin,

Description:
This fixes the issue with the deoptimizer trashing the reloc info before
patching the code.

If we, immediately after the deoptimization, but before actually
running the patched code, get a compacting GC, the addresses from the
calls might no longer be valid.

I have validated that this works by patching the existing code to
always do a compacting gc after we finish deoptimizing. I will create
a real regression test for this, but this includes additional code for
allowing us to force a deopt/opt from javascript test code. I will
land this in a seperate change.



Please review this at http://codereview.chromium.org/6349043/

SVN Base: http://v8.googlecode.com/svn/branches/bleeding_edge/

Affected files:
  M     src/ia32/deoptimizer-ia32.cc


Index: src/ia32/deoptimizer-ia32.cc
===================================================================
--- src/ia32/deoptimizer-ia32.cc        (revision 6550)
+++ src/ia32/deoptimizer-ia32.cc        (working copy)
@@ -33,6 +33,7 @@
 #include "deoptimizer.h"
 #include "full-codegen.h"
 #include "safepoint-table.h"
+#include "utils.h"

 namespace v8 {
 namespace internal {
@@ -48,14 +49,16 @@
   // Get the optimized code.
   Code* code = function->code();

- // Invalidate the relocation information, as it will become invalid by the
-  // code patching below, and is not needed any more.
-  code->InvalidateRelocation();
-
   // For each return after a safepoint insert a absolute call to the
   // corresponding deoptimization entry.
   unsigned last_pc_offset = 0;
   SafepointTable table(function->code());
+
+  Address original_reloc_payload = code->relocation_start();
+  Address reloc_end =
+ RoundUp(original_reloc_payload + code->relocation_size(), kPointerSize);;
+  RelocInfoWriter reloc_info_writer(reloc_end, code->instruction_start());
+
   for (unsigned i = 0; i < table.length(); i++) {
     unsigned pc_offset = table.GetPcOffset(i);
     SafepointEntry safepoint_entry = table.GetEntry(i);
@@ -72,12 +75,15 @@
 #endif
     last_pc_offset = pc_offset;
     if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
-      CodePatcher patcher(
-          code->instruction_start() + pc_offset + gap_code_size,
-          Assembler::kCallInstructionLength);
- patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY),
-                           RelocInfo::NONE);
+ Address call_pc = code->instruction_start() + pc_offset + gap_code_size;
+      CodePatcher patcher(call_pc, Assembler::kCallInstructionLength);
+      Address entry_address =
+          GetDeoptimizationEntry(deoptimization_index, LAZY);
+      patcher.masm()->call(entry_address, RelocInfo::NONE);
       last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
+      RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY,
+                      reinterpret_cast<intptr_t>(entry_address));
+      reloc_info_writer.Write(&rinfo);
     }
   }
 #ifdef DEBUG
@@ -90,6 +96,40 @@
   }
 #endif

+  int reloc_size = reloc_end - reloc_info_writer.pos();
+  memmove(original_reloc_payload, reloc_info_writer.pos(), reloc_size);
+
+  // The relocation info is in place, update the size.
+  code->relocation_info()->set_length(reloc_size);
+
+  Address new_reloc_end =
+      RoundUp(code->relocation_start() + reloc_size, kPointerSize);
+  CHECK(new_reloc_end <= reloc_end);
+
+  // Handle the junk part after the new relocation info.
+  if (reloc_end - new_reloc_end <= ByteArray::kHeaderSize) {
+    // We get in here if there is not enough space for a ByteArray.
+
+    // Both addresses are kPointerSize alligned.
+    CHECK((reloc_end -new_reloc_end) % 4 == 0);
+    while(reloc_end > new_reloc_end) {
+      Address filler = reloc_end - kPointerSize;
+      Memory::Object_at(filler) = Heap::one_pointer_filler_map();
+      reloc_end -= kPointerSize;
+    }
+  } else {
+    Address junk_data_start = new_reloc_end + ByteArray::kHeaderSize;
+    int junk_size = reloc_end - junk_data_start;
+
+    // Since the reloc_end address and junk_data_start are both alligned,
+    // we shouild never have junk which is not a multipla of kPointerSize.
+    CHECK(junk_size % kPointerSize == 0);
+    CHECK(junk_size > 0);
+ ByteArray* junk_array = ByteArray::FromDataStartAddress(junk_data_start);
+    junk_array->set_map(Heap::byte_array_map());
+    junk_array->set_length(junk_size);
+  }
+
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
   node->set_next(deoptimizing_code_list_);


--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to