Revision: 21481
Author:   [email protected]
Date:     Mon May 26 08:13:09 2014 UTC
Log: Skip write barriers in the fast case when setting up local context.

The FastNewContextStub always allocates in new space, so we don't
need to update the write barrier when copying the parameters to
the newly allocated context.

[email protected]

Review URL: https://codereview.chromium.org/297203002
http://code.google.com/p/v8/source/detail?r=21481

Modified:
 /branches/bleeding_edge/src/arm/full-codegen-arm.cc
 /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
 /branches/bleeding_edge/src/arm64/full-codegen-arm64.cc
 /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc
 /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc
 /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc
 /branches/bleeding_edge/src/objects.h
 /branches/bleeding_edge/src/x64/full-codegen-x64.cc
 /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc

=======================================
--- /branches/bleeding_edge/src/arm/full-codegen-arm.cc Fri May 16 15:18:24 2014 UTC +++ /branches/bleeding_edge/src/arm/full-codegen-arm.cc Mon May 26 08:13:09 2014 UTC
@@ -212,6 +212,7 @@
   if (heap_slots > 0) {
     // Argument to NewContext is the function, which is still in r1.
     Comment cmnt(masm_, "[ Allocate context");
+    bool need_write_barrier = true;
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ push(r1);
       __ Push(info->scope()->GetScopeInfo());
@@ -219,6 +220,8 @@
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
     } else {
       __ push(r1);
       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@@ -242,8 +245,15 @@
         __ str(r0, target);

         // Update the write barrier.
-        __ RecordWriteContextSlot(
-            cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(
+ cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, r0, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
       }
     }
   }
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Thu May 22 08:37:50 2014 UTC +++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Mon May 26 08:13:09 2014 UTC
@@ -179,10 +179,13 @@
   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
     // Argument to NewContext is the function, which is in r1.
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
     } else {
       __ push(r1);
       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@@ -205,13 +208,20 @@
         MemOperand target = ContextOperand(cp, var->index());
         __ str(r0, target);
         // Update the write barrier. This clobbers r3 and r0.
-        __ RecordWriteContextSlot(
-            cp,
-            target.offset(),
-            r0,
-            r3,
-            GetLinkRegisterState(),
-            kSaveFPRegs);
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(
+              cp,
+              target.offset(),
+              r0,
+              r3,
+              GetLinkRegisterState(),
+              kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, r0, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
       }
     }
     Comment(";;; End allocate local context");
=======================================
--- /branches/bleeding_edge/src/arm64/full-codegen-arm64.cc Fri May 16 15:18:24 2014 UTC +++ /branches/bleeding_edge/src/arm64/full-codegen-arm64.cc Mon May 26 08:13:09 2014 UTC
@@ -211,6 +211,7 @@
   if (heap_slots > 0) {
     // Argument to NewContext is the function, which is still in x1.
     Comment cmnt(masm_, "[ Allocate context");
+    bool need_write_barrier = true;
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
       __ Push(x1, x10);
@@ -218,6 +219,8 @@
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
     } else {
       __ Push(x1);
       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@@ -241,8 +244,15 @@
         __ Str(x10, target);

         // Update the write barrier.
-        __ RecordWriteContextSlot(
- cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(
+ cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
       }
     }
   }
=======================================
--- /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc Fri May 23 14:06:42 2014 UTC +++ /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc Mon May 26 08:13:09 2014 UTC
@@ -694,10 +694,13 @@
   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
     // Argument to NewContext is the function, which is in x1.
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
     } else {
       __ Push(x1);
       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@@ -723,8 +726,15 @@
         MemOperand target = ContextMemOperand(cp, var->index());
         __ Str(value, target);
         // Update the write barrier. This clobbers value and scratch.
-        __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
-                                  GetLinkRegisterState(), kSaveFPRegs);
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
+                                    GetLinkRegisterState(), kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
       }
     }
     Comment(";;; End allocate local context");
=======================================
--- /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Fri May 16 15:18:24 2014 UTC +++ /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Mon May 26 08:13:09 2014 UTC
@@ -198,6 +198,7 @@
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment cmnt(masm_, "[ Allocate context");
+    bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in edi.
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ push(edi);
@@ -206,6 +207,8 @@
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
     } else {
       __ push(edi);
       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@@ -229,11 +232,18 @@
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
         // Update the write barrier. This clobbers eax and ebx.
-        __ RecordWriteContextSlot(esi,
-                                  context_offset,
-                                  eax,
-                                  ebx,
-                                  kDontSaveFPRegs);
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(esi,
+                                    context_offset,
+                                    eax,
+                                    ebx,
+                                    kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
       }
     }
   }
=======================================
--- /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Mon May 26 06:41:21 2014 UTC +++ /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Mon May 26 08:13:09 2014 UTC
@@ -254,10 +254,13 @@
   int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in edi.
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
     } else {
       __ push(edi);
       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@@ -281,11 +284,18 @@
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
         // Update the write barrier. This clobbers eax and ebx.
-        __ RecordWriteContextSlot(esi,
-                                  context_offset,
-                                  eax,
-                                  ebx,
-                                  kDontSaveFPRegs);
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(esi,
+                                    context_offset,
+                                    eax,
+                                    ebx,
+                                    kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
       }
     }
     Comment(";;; End allocate local context");
=======================================
--- /branches/bleeding_edge/src/objects.h       Fri May 23 12:55:57 2014 UTC
+++ /branches/bleeding_edge/src/objects.h       Mon May 26 08:13:09 2014 UTC
@@ -1071,6 +1071,7 @@
"Expected fixed array in register r2") \ V(kExpectedFixedArrayInRegisterRbx, \ "Expected fixed array in register rbx") \ + V(kExpectedNewSpaceObject, "Expected new space object") \ V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \ V(kExpectedUndefinedOrCell, \ "Expected undefined or cell in register") \
=======================================
--- /branches/bleeding_edge/src/x64/full-codegen-x64.cc Fri May 16 15:18:24 2014 UTC +++ /branches/bleeding_edge/src/x64/full-codegen-x64.cc Mon May 26 08:13:09 2014 UTC
@@ -199,6 +199,7 @@
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment cmnt(masm_, "[ Allocate context");
+    bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in rdi.
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ Push(rdi);
@@ -207,6 +208,8 @@
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
     } else {
       __ Push(rdi);
       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@@ -230,8 +233,15 @@
         int context_offset = Context::SlotOffset(var->index());
         __ movp(Operand(rsi, context_offset), rax);
         // Update the write barrier.  This clobbers rax and rbx.
-        __ RecordWriteContextSlot(
-            rsi, context_offset, rax, rbx, kDontSaveFPRegs);
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(
+              rsi, context_offset, rax, rbx, kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
       }
     }
   }
=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Mon May 26 06:41:21 2014 UTC +++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Mon May 26 08:13:09 2014 UTC
@@ -191,10 +191,13 @@
   int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in rdi.
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
     } else {
       __ Push(rdi);
       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@@ -218,7 +221,14 @@
         int context_offset = Context::SlotOffset(var->index());
         __ movp(Operand(rsi, context_offset), rax);
         // Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
+        if (need_write_barrier) {
+ __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
       }
     }
     Comment(";;; End allocate local context");

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to