Reviewers: Jakob,

Message:
Hi Jakob, if you could take a look at this cleanup CL. It is in preparation of
the stackwalk bugfix we discussed.
Thanks,
--Michael


Description:
BinaryOpStub::GenerateSmiStub() on 32bit would erroneously patch the IC in case of a gc requirement. Brought the behavior into line with ARM and x64. Also some
cleanup to label names.

BUG=

Please review this at https://codereview.chromium.org/15102006/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files:
  M src/ia32/code-stubs-ia32.cc
  M src/x64/code-stubs-x64.cc


Index: src/ia32/code-stubs-ia32.cc
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 2897234977d0ace3dd3259635392277e7c9d02fb..139d1a4a7904cde41b80abf39a51bdbe230e546e 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1230,6 +1230,7 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
 static void BinaryOpStub_GenerateSmiCode(
     MacroAssembler* masm,
     Label* slow,
+    Label* gc_required,
BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
     Token::Value op) {
   // 1. Move arguments into edx, eax except for DIV and MOD, which need the
@@ -1489,7 +1490,7 @@ static void BinaryOpStub_GenerateSmiCode(
         __ bind(&use_fp_on_smis);
// Result we want is in left == edx, so we can put the allocated heap
         // number in eax.
-        __ AllocateHeapNumber(eax, ecx, ebx, slow);
+        __ AllocateHeapNumber(eax, ecx, ebx, gc_required);
         // Store the result in the HeapNumber and return.
         // It's OK to overwrite the arguments on the stack because we
         // are about to return.
@@ -1543,7 +1544,7 @@ static void BinaryOpStub_GenerateSmiCode(
           default: UNREACHABLE();
             break;
         }
-        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+        __ AllocateHeapNumber(ecx, ebx, no_reg, gc_required);
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatureScope use_sse2(masm, SSE2);
           FloatingPointHelper::LoadSSE2Smis(masm, ebx);
@@ -1628,12 +1629,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   if (result_type_ == BinaryOpIC::UNINITIALIZED ||
       result_type_ == BinaryOpIC::SMI) {
     BinaryOpStub_GenerateSmiCode(
-        masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
+        masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS, op_);
   } else {
     BinaryOpStub_GenerateSmiCode(
-        masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+        masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
   }
-  __ bind(&call_runtime);
+
+ // Code falls through if the result is not returned as either a smi or heap
+  // number.
   switch (op_) {
     case Token::ADD:
     case Token::SUB:
@@ -1653,6 +1656,27 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
     default:
       UNREACHABLE();
   }
+
+  __ bind(&call_runtime);
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      GenerateRegisterArgsPush(masm);
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      break;
+    default:
+      UNREACHABLE();
+  }
+  GenerateCallRuntime(masm);
 }


@@ -2128,7 +2152,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
   }

   BinaryOpStub_GenerateSmiCode(
-      masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+      masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);

   // Floating point case.
   switch (op_) {
Index: src/x64/code-stubs-x64.cc
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index f7ded184ecc386ee47777caf4bd4e079ffcc7a58..f3cdda7ba3a41de5b568a362e8464946f87d9efb 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1007,7 +1007,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {

 static void BinaryOpStub_GenerateSmiCode(
     MacroAssembler* masm,
-    Label* slow,
+    Label* gc_required,
BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
     Token::Value op) {

@@ -1116,7 +1116,7 @@ static void BinaryOpStub_GenerateSmiCode(
     }

     if (generate_inline_heapnumber_results) {
-      __ AllocateHeapNumber(rcx, rbx, slow);
+      __ AllocateHeapNumber(rcx, rbx, gc_required);
       Comment perform_float(masm, "-- Perform float operation on smis");
       if (op == Token::SHR) {
         __ SmiToInteger32(left, left);
@@ -1304,7 +1304,7 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {


 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
-  Label call_runtime;
+  Label gc_required;
   if (result_type_ == BinaryOpIC::UNINITIALIZED ||
       result_type_ == BinaryOpIC::SMI) {
     // Only allow smi results.
@@ -1313,15 +1313,15 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { // Allow heap number result and don't make a transition if a heap number
     // cannot be allocated.
     BinaryOpStub_GenerateSmiCode(
-        masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+        masm, &gc_required, ALLOW_HEAPNUMBER_RESULTS, op_);
   }

// Code falls through if the result is not returned as either a smi or heap
   // number.
   GenerateTypeTransition(masm);

-  if (call_runtime.is_linked()) {
-    __ bind(&call_runtime);
+  if (gc_required.is_linked()) {
+    __ bind(&gc_required);
     GenerateRegisterArgsPush(masm);
     GenerateCallRuntime(masm);
   }


--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.


Reply via email to