Revision: 6444
Author: [email protected]
Date: Mon Jan 24 06:54:45 2011
Log: Move stack check patching to the architecture dependent deoptimizer
files. On ARM it is not enough to iterate the relocation information
because that will only give us access to the constant pool and not to
the place in the instruction stream where the target in the constant
pool is called.

Review URL: http://codereview.chromium.org/6343005
http://code.google.com/p/v8/source/detail?r=6444

Modified:
 /branches/bleeding_edge/src/arm/deoptimizer-arm.cc
 /branches/bleeding_edge/src/deoptimizer.h
 /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc
 /branches/bleeding_edge/src/runtime-profiler.cc
 /branches/bleeding_edge/src/runtime.cc
 /branches/bleeding_edge/src/x64/deoptimizer-x64.cc

=======================================
--- /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Wed Jan 12 06:14:14 2011 +++ /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Mon Jan 24 06:54:45 2011
@@ -112,13 +112,16 @@
 }


-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
+                                      Code* check_code,
                                       Code* replacement_code) {
   UNIMPLEMENTED();
 }


-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
+                                       Code* check_code,
+                                       Code* replacement_code) {
   UNIMPLEMENTED();
 }

=======================================
--- /branches/bleeding_edge/src/deoptimizer.h   Wed Dec 22 01:49:26 2010
+++ /branches/bleeding_edge/src/deoptimizer.h   Mon Jan 24 06:54:45 2011
@@ -128,14 +128,17 @@

static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);

-  // Given the relocation info of a call to the stack check stub, patch the
-  // code so as to go unconditionally to the on-stack replacement builtin
-  // instead.
- static void PatchStackCheckCode(RelocInfo* rinfo, Code* replacement_code);
-
-  // Given the relocation info of a call to the on-stack replacement
-  // builtin, patch the code back to the original stack check code.
-  static void RevertStackCheckCode(RelocInfo* rinfo, Code* check_code);
+  // Patch all stack guard checks in the unoptimized code to
+  // unconditionally call replacement_code.
+  static void PatchStackCheckCode(Code* unoptimized_code,
+                                  Code* check_code,
+                                  Code* replacement_code);
+
+  // Change all patched stack guard checks in the unoptimized code
+  // back to a normal stack guard check.
+  static void RevertStackCheckCode(Code* unoptimized_code,
+                                   Code* check_code,
+                                   Code* replacement_code);

   ~Deoptimizer();

=======================================
--- /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Thu Jan 13 06:10:26 2011 +++ /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Mon Jan 24 06:54:45 2011
@@ -106,44 +106,71 @@
 }


-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
+                                      Code* check_code,
                                       Code* replacement_code) {
-  // The stack check code matches the pattern:
-  //
-  //     cmp esp, <limit>
-  //     jae ok
-  //     call <stack guard>
-  //     test eax, <loop nesting depth>
-  // ok: ...
-  //
-  // We will patch away the branch so the code is:
-  //
-  //     cmp esp, <limit>  ;; Not changed
-  //     nop
-  //     nop
-  //     call <on-stack replacment>
-  //     test eax, <loop nesting depth>
-  // ok:
-  Address call_target_address = rinfo->pc();
-  ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
-         *(call_target_address - 2) == 0x07 &&  // offset
-         *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x90;  // nop
-  *(call_target_address - 2) = 0x90;  // nop
-  rinfo->set_target_address(replacement_code->entry());
+  // Iterate the unoptimized code and patch every stack check except at
+  // the function entry.  This code assumes the function entry stack
+  // check appears first i.e., is not deferred or otherwise reordered.
+  ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+  bool first = true;
+  for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
+       !it.done();
+       it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    if (rinfo->target_address() == Code::cast(check_code)->entry()) {
+      if (first) {
+        first = false;
+      } else {
+        // The stack check code matches the pattern:
+        //
+        //     cmp esp, <limit>
+        //     jae ok
+        //     call <stack guard>
+        //     test eax, <loop nesting depth>
+        // ok: ...
+        //
+        // We will patch away the branch so the code is:
+        //
+        //     cmp esp, <limit>  ;; Not changed
+        //     nop
+        //     nop
+        //     call <on-stack replacment>
+        //     test eax, <loop nesting depth>
+        // ok:
+        Address call_target_address = rinfo->pc();
+        ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
+               *(call_target_address - 2) == 0x07 &&  // offset
+               *(call_target_address - 1) == 0xe8);   // call
+        *(call_target_address - 3) = 0x90;  // nop
+        *(call_target_address - 2) = 0x90;  // nop
+        rinfo->set_target_address(replacement_code->entry());
+      }
+    }
+  }
 }


-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
-  // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
-  // restore the conditional branch.
-  Address call_target_address = rinfo->pc();
-  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
-         *(call_target_address - 2) == 0x90 &&  // nop
-         *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x73;  // jae
-  *(call_target_address - 2) = 0x07;  // offset
-  rinfo->set_target_address(check_code->entry());
+void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
+                                       Code* check_code,
+                                       Code* replacement_code) {
+  // Iterate the unoptimized code and revert all the patched stack checks.
+  for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
+       !it.done();
+       it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    if (rinfo->target_address() == replacement_code->entry()) {
+ // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
+      // restore the conditional branch.
+      Address call_target_address = rinfo->pc();
+      ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
+             *(call_target_address - 2) == 0x90 &&  // nop
+             *(call_target_address - 1) == 0xe8);   // call
+      *(call_target_address - 3) = 0x73;  // jae
+      *(call_target_address - 2) = 0x07;  // offset
+      rinfo->set_target_address(check_code->entry());
+    }
+  }
 }


=======================================
--- /branches/bleeding_edge/src/runtime-profiler.cc     Tue Jan 18 05:59:49 2011
+++ /branches/bleeding_edge/src/runtime-profiler.cc     Mon Jan 24 06:54:45 2011
@@ -193,22 +193,9 @@
   if (maybe_check_code->ToObject(&check_code)) {
Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement);
     Code* unoptimized_code = shared->code();
-    // Iterate the unoptimized code and patch every stack check except at
-    // the function entry.  This code assumes the function entry stack
-    // check appears first i.e., is not deferred or otherwise reordered.
-    bool first = true;
-    for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
-         !it.done();
-         it.next()) {
-      RelocInfo* rinfo = it.rinfo();
-      if (rinfo->target_address() == Code::cast(check_code)->entry()) {
-        if (first) {
-          first = false;
-        } else {
-          Deoptimizer::PatchStackCheckCode(rinfo, replacement_code);
-        }
-      }
-    }
+    Deoptimizer::PatchStackCheckCode(unoptimized_code,
+                                     Code::cast(check_code),
+                                     replacement_code);
   }
 }

=======================================
--- /branches/bleeding_edge/src/runtime.cc      Mon Jan 24 06:03:30 2011
+++ /branches/bleeding_edge/src/runtime.cc      Mon Jan 24 06:54:45 2011
@@ -6944,15 +6944,9 @@
   Handle<Code> check_code = check_stub.GetCode();
   Handle<Code> replacement_code(
       Builtins::builtin(Builtins::OnStackReplacement));
-  // Iterate the unoptimized code and revert all the patched stack checks.
-  for (RelocIterator it(*unoptimized, RelocInfo::kCodeTargetMask);
-       !it.done();
-       it.next()) {
-    RelocInfo* rinfo = it.rinfo();
-    if (rinfo->target_address() == replacement_code->entry()) {
-      Deoptimizer::RevertStackCheckCode(rinfo, *check_code);
-    }
-  }
+  Deoptimizer::RevertStackCheckCode(*unoptimized,
+                                    *check_code,
+                                    *replacement_code);

   // Allow OSR only at nesting level zero again.
   unoptimized->set_allow_osr_at_loop_nesting_level(0);
=======================================
--- /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Mon Jan 17 05:11:39 2011 +++ /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Mon Jan 24 06:54:45 2011
@@ -46,13 +46,16 @@
 }


-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
+                                      Code* check_code,
                                       Code* replacement_code) {
   UNIMPLEMENTED();
 }


-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
+                                       Code* check_code,
+                                       Code* replacement_code) {
   UNIMPLEMENTED();
 }

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to