Revision: 10105
Author:   [email protected]
Date:     Thu Dec  1 00:22:35 2011
Log:      Version 3.7.12

Increase tick interval for the android platform.

Fix a bug in the register allocator. (chromium:105112)

Fix handling of recompiling code. (chromium:105375, v8:1782)

Start incremental marking on idle notification. (v8:1458)

Build fixes for various platforms.

Various performance improvements.
http://code.google.com/p/v8/source/detail?r=10105

Added:
 /trunk/test/mjsunit/debug-break-inline.js
Modified:
 /trunk/ChangeLog
 /trunk/include/v8.h
 /trunk/src/api.cc
 /trunk/src/debug.cc
 /trunk/src/full-codegen.cc
 /trunk/src/full-codegen.h
 /trunk/src/heap.cc
 /trunk/src/heap.h
 /trunk/src/incremental-marking.cc
 /trunk/src/incremental-marking.h
 /trunk/src/lithium-allocator.cc
 /trunk/src/log.h
 /trunk/src/mark-compact.cc
 /trunk/src/mips/code-stubs-mips.cc
 /trunk/src/mips/full-codegen-mips.cc
 /trunk/src/mips/lithium-codegen-mips.cc
 /trunk/src/mips/lithium-codegen-mips.h
 /trunk/src/objects.cc
 /trunk/src/parser.cc
 /trunk/src/spaces.cc
 /trunk/src/spaces.h
 /trunk/src/v8.cc
 /trunk/src/v8.h
 /trunk/src/version.cc
 /trunk/test/cctest/test-api.cc
 /trunk/test/cctest/test-heap.cc
 /trunk/test/cctest/test-mark-compact.cc
 /trunk/test/mjsunit/debug-step-3.js

=======================================
--- /dev/null
+++ /trunk/test/mjsunit/debug-break-inline.js   Thu Dec  1 00:22:35 2011
@@ -0,0 +1,100 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// This test tests that deoptimization due to debug breaks works for
+// inlined functions where the full-code is generated before the
+// debugger is attached.
+//
+//See http://code.google.com/p/chromium/issues/detail?id=105375
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug;
+
+var count = 0;
+var break_count = 0;
+
+// Debug event listener which sets a breakpoint first time it is hit
+// and otherwise counts break points hit and checks that the expected
+// state is reached.
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    break_count++;
+    if (break_count == 1) {
+      Debug.setBreakPoint(g, 3);
+
+      for (var i = 0; i < exec_state.frameCount(); i++) {
+        var frame = exec_state.frame(i);
+        // When function f is optimized (1 means YES, see runtime.cc) we
+        // expect an optimized frame for f and g.
+        if (%GetOptimizationStatus(f) == 1) {
+          if (i == 1) {
+            assertTrue(frame.isOptimizedFrame());
+            assertTrue(frame.isInlinedFrame());
+            assertEquals(4 - i, frame.inlinedFrameIndex());
+          } else if (i == 2) {
+            assertTrue(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          } else {
+            assertFalse(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          }
+        }
+      }
+    }
+  }
+}
+
+function f() {
+  g();
+}
+
+function g() {
+  count++;
+  h();
+  var b = 1;  // Break point is set here.
+}
+
+function h() {
+  debugger;
+}
+
+f();f();f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+f();
+
+assertEquals(5, count);
+assertEquals(2, break_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
=======================================
--- /trunk/ChangeLog    Tue Nov 29 06:28:56 2011
+++ /trunk/ChangeLog    Thu Dec  1 00:22:35 2011
@@ -1,3 +1,18 @@
+2011-12-01: Version 3.7.12
+
+        Increase tick interval for the android platform.
+
+        Fix a bug in the register allocator. (chromium:105112)
+
+        Fix handling of recompiling code. (chromium:105375, v8:1782)
+
+        Start incremental marking on idle notification. (v8:1458)
+
+        Build fixes for various platforms.
+
+        Various performance improvements.
+
+
 2011-11-29: Version 3.7.11

         Fixed bug when generating padding to ensure space for lazy
=======================================
--- /trunk/include/v8.h Tue Nov 29 06:28:56 2011
+++ /trunk/include/v8.h Thu Dec  1 00:22:35 2011
@@ -3194,8 +3194,12 @@
    * Returns true if the embedder should stop calling IdleNotification
    * until real work has been done.  This indicates that V8 has done
    * as much cleanup as it will be able to do.
+   *
+ * The hint argument specifies the amount of work to be done in the function + * on scale from 1 to 1000. There is no guarantee that the actual work will
+   * match the hint.
    */
-  static bool IdleNotification();
+  static bool IdleNotification(int hint = 1000);

   /**
    * Optional notification that the system is running low on memory.
=======================================
--- /trunk/src/api.cc   Tue Nov 29 06:28:56 2011
+++ /trunk/src/api.cc   Thu Dec  1 00:22:35 2011
@@ -4020,12 +4020,12 @@
 }


-bool v8::V8::IdleNotification() {
+bool v8::V8::IdleNotification(int hint) {
   // Returning true tells the caller that it need not
   // continue to call IdleNotification.
   i::Isolate* isolate = i::Isolate::Current();
   if (isolate == NULL || !isolate->IsInitialized()) return true;
-  return i::V8::IdleNotification();
+  return i::V8::IdleNotification(hint);
 }


=======================================
--- /trunk/src/debug.cc Thu Nov 10 03:38:15 2011
+++ /trunk/src/debug.cc Thu Dec  1 00:22:35 2011
@@ -1780,17 +1780,29 @@
       // values and performing a heap iteration.
       AssertNoAllocation no_allocation;

-      // Find all non-optimized code functions with activation frames on
-      // the stack.
+      // Find all non-optimized code functions with activation frames
+      // on the stack. This includes functions which have optimized
+      // activations (including inlined functions) on the stack as the
+      // non-optimized code is needed for the lazy deoptimization.
for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
         JavaScriptFrame* frame = it.frame();
-        if (frame->function()->IsJSFunction()) {
+        if (frame->is_optimized()) {
+          List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
+          frame->GetFunctions(&functions);
+          for (int i = 0; i < functions.length(); i++) {
+            if (!functions[i]->shared()->code()->has_debug_break_slots()) {
+              active_functions.Add(Handle<JSFunction>(functions[i]));
+            }
+          }
+        } else if (frame->function()->IsJSFunction()) {
           JSFunction* function = JSFunction::cast(frame->function());
           if (function->code()->kind() == Code::FUNCTION &&
-              !function->code()->has_debug_break_slots())
+              !function->code()->has_debug_break_slots()) {
             active_functions.Add(Handle<JSFunction>(function));
+          }
         }
       }
+
       // Sort the functions on the object pointer value to prepare for
       // the binary search below.
       active_functions.Sort(HandleObjectPointerCompare<JSFunction>);
@@ -1838,6 +1850,9 @@

       // Make sure that the shared full code is compiled with debug
       // break slots.
+      if (function->code() == *lazy_compile) {
+        function->set_code(shared->code());
+      }
       Handle<Code> current_code(function->code());
       if (shared->code()->has_debug_break_slots()) {
         // if the code is already recompiled to have break slots skip
@@ -1862,7 +1877,7 @@
       }
       Handle<Code> new_code(shared->code());

-      // Find the function and patch return address.
+      // Find the function and patch the return address.
for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
         JavaScriptFrame* frame = it.frame();
         // If the current frame is for this function in its
=======================================
--- /trunk/src/full-codegen.cc  Tue Nov 29 06:28:56 2011
+++ /trunk/src/full-codegen.cc  Thu Dec  1 00:22:35 2011
@@ -362,7 +362,7 @@
 }


-void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
+void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
// There's no need to prepare this code for bailouts from already optimized
   // code or code that can't be optimized.
   if (!FLAG_deopt || !info_->HasDeoptimizationSupport()) return;
@@ -383,10 +383,11 @@
 }


-void FullCodeGenerator::RecordStackCheck(int ast_id) {
+void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
   // The pc offset does not need to be encoded and packed together with a
   // state.
-  BailoutEntry entry = { ast_id, masm_->pc_offset() };
+  ASSERT(masm_->pc_offset() > 0);
+ BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
   stack_checks_.Add(entry);
 }

=======================================
--- /trunk/src/full-codegen.h   Tue Nov 29 06:28:56 2011
+++ /trunk/src/full-codegen.h   Thu Dec  1 00:22:35 2011
@@ -390,7 +390,7 @@

   // Bailout support.
   void PrepareForBailout(Expression* node, State state);
-  void PrepareForBailoutForId(int id, State state);
+  void PrepareForBailoutForId(unsigned id, State state);

   // Record a call's return site offset, used to rebuild the frame if the
   // called function was inlined at the site.
@@ -417,7 +417,7 @@
   // a loop.
   void EmitStackCheck(IterationStatement* stmt);
   // Record the OSR AST id corresponding to a stack check in the code.
-  void RecordStackCheck(int osr_ast_id);
+  void RecordStackCheck(unsigned osr_ast_id);
   // Emit a table of stack check ids and pcs into the code stream.  Return
   // the offset of the start of the table.
   unsigned EmitStackCheckTable();
=======================================
--- /trunk/src/heap.cc  Tue Nov 29 06:28:56 2011
+++ /trunk/src/heap.cc  Thu Dec  1 00:22:35 2011
@@ -144,6 +144,11 @@
       number_idle_notifications_(0),
       last_idle_notification_gc_count_(0),
       last_idle_notification_gc_count_init_(false),
+      idle_notification_will_schedule_next_gc_(false),
+      mark_sweeps_since_idle_round_started_(0),
+      ms_count_at_last_idle_notification_(0),
+      gc_count_at_last_idle_gc_(0),
+      scavenges_since_last_idle_round_(kIdleScavengeThreshold),
       promotion_queue_(this),
       configured_(false),
       chunks_queued_for_free_(NULL) {
@@ -1081,8 +1086,7 @@

   incremental_marking()->PrepareForScavenge();

-  old_pointer_space()->AdvanceSweeper(new_space_.Size());
-  old_data_space()->AdvanceSweeper(new_space_.Size());
+  AdvanceSweepers(static_cast<int>(new_space_.Size()));

// Flip the semispaces. After flipping, to space is empty, from space has
   // live objects.
@@ -1171,6 +1175,8 @@
   LOG(isolate_, ResourceEvent("scavenge", "end"));

   gc_state_ = NOT_IN_GC;
+
+  scavenges_since_last_idle_round_++;
 }


@@ -4534,7 +4540,80 @@
 }


-bool Heap::IdleNotification() {
+bool Heap::IdleNotification(int hint) {
+ if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
+    return hint < 1000 ? true : IdleGlobalGC();
+  }
+
+  // By doing small chunks of GC work in each IdleNotification,
+  // perform a round of incremental GCs and after that wait until
+  // the mutator creates enough garbage to justify a new round.
+  // An incremental GC progresses as follows:
+  // 1. many incremental marking steps,
+  // 2. one old space mark-sweep-compact,
+  // 3. many lazy sweep steps.
+  // Use mark-sweep-compact events to count incremental GCs in a round.
+
+  intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
+  // The size factor is in range [3..100].
+ intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
+
+  if (incremental_marking()->IsStopped()) {
+    if (!IsSweepingComplete() &&
+        !AdvanceSweepers(static_cast<int>(step_size))) {
+      return false;
+    }
+  }
+
+  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+    if (EnoughGarbageSinceLastIdleRound()) {
+      StartIdleRound();
+    } else {
+      return true;
+    }
+  }
+
+  int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
+  mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
+  ms_count_at_last_idle_notification_ = ms_count_;
+
+  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+    FinishIdleRound();
+    return true;
+  }
+
+  if (incremental_marking()->IsStopped()) {
+    if (hint < 1000 && !WorthStartingGCWhenIdle()) {
+      FinishIdleRound();
+      return true;
+    }
+    incremental_marking()->Start();
+  }
+
+ // This flag prevents incremental marking from requesting GC via stack guard
+  idle_notification_will_schedule_next_gc_ = true;
+  incremental_marking()->Step(step_size);
+  idle_notification_will_schedule_next_gc_ = false;
+
+  if (incremental_marking()->IsComplete()) {
+    bool uncommit = false;
+    if (gc_count_at_last_idle_gc_ == gc_count_) {
+      // No GC since the last full GC, the mutator is probably not active.
+      isolate_->compilation_cache()->Clear();
+      uncommit = true;
+    }
+    CollectAllGarbage(kNoGCFlags);
+    gc_count_at_last_idle_gc_ = gc_count_;
+    if (uncommit) {
+      new_space_.Shrink();
+      UncommitFromSpace();
+    }
+  }
+  return false;
+}
+
+
+bool Heap::IdleGlobalGC() {
   static const int kIdlesBeforeScavenge = 4;
   static const int kIdlesBeforeMarkSweep = 7;
   static const int kIdlesBeforeMarkCompact = 8;
=======================================
--- /trunk/src/heap.h   Tue Nov 29 06:28:56 2011
+++ /trunk/src/heap.h   Thu Dec  1 00:22:35 2011
@@ -1331,8 +1331,8 @@
     return Min(limit, halfway_to_the_max);
   }

-  // Can be called when the embedding application is idle.
-  bool IdleNotification();
+  // Implements the corresponding V8 API function.
+  bool IdleNotification(int hint);

   // Declare all the root indices.
   enum RootListIndex {
@@ -1454,6 +1454,17 @@
   IncrementalMarking* incremental_marking() {
     return &incremental_marking_;
   }
+
+  bool IsSweepingComplete() {
+    return old_data_space()->IsSweepingComplete() &&
+           old_pointer_space()->IsSweepingComplete();
+  }
+
+  bool AdvanceSweepers(int step_size) {
+    bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
+    sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
+    return sweeping_complete;
+  }

   ExternalStringTable* external_string_table() {
     return &external_string_table_;
@@ -1490,6 +1501,10 @@
   // The roots that have an index less than this are always in old space.
   static const int kOldSpaceRoots = 0x20;

+  bool idle_notification_will_schedule_next_gc() {
+    return idle_notification_will_schedule_next_gc_;
+  }
+
  private:
   Heap();

@@ -1823,6 +1838,30 @@

   void SelectScavengingVisitorsTable();

+  void StartIdleRound() {
+    mark_sweeps_since_idle_round_started_ = 0;
+    ms_count_at_last_idle_notification_ = ms_count_;
+  }
+
+  void FinishIdleRound() {
+    mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
+    scavenges_since_last_idle_round_ = 0;
+  }
+
+  bool EnoughGarbageSinceLastIdleRound() {
+    return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
+  }
+
+  bool WorthStartingGCWhenIdle() {
+    if (contexts_disposed_ > 0) {
+      return true;
+    }
+    return incremental_marking()->WorthActivating();
+  }
+
+  // Returns true if no more GC work is left.
+  bool IdleGlobalGC();
+
   static const int kInitialSymbolTableSize = 2048;
   static const int kInitialEvalCacheSize = 64;

@@ -1852,6 +1891,15 @@
   unsigned int last_idle_notification_gc_count_;
   bool last_idle_notification_gc_count_init_;

+  bool idle_notification_will_schedule_next_gc_;
+  int mark_sweeps_since_idle_round_started_;
+  int ms_count_at_last_idle_notification_;
+  unsigned int gc_count_at_last_idle_gc_;
+  int scavenges_since_last_idle_round_;
+
+  static const int kMaxMarkSweepsInIdleRound = 7;
+  static const int kIdleScavengeThreshold = 5;
+
   // Shared state read by the scavenge collector and set by ScavengeObject.
   PromotionQueue promotion_queue_;

=======================================
--- /trunk/src/incremental-marking.cc   Thu Nov 17 00:34:43 2011
+++ /trunk/src/incremental-marking.cc   Thu Dec  1 00:22:35 2011
@@ -743,7 +743,9 @@
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Complete (normal).\n");
   }
-  heap_->isolate()->stack_guard()->RequestGC();
+  if (!heap_->idle_notification_will_schedule_next_gc()) {
+    heap_->isolate()->stack_guard()->RequestGC();
+  }
 }


@@ -771,8 +773,7 @@
   }

   if (state_ == SWEEPING) {
-    if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
-        heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
+    if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
       bytes_scanned_ = 0;
       StartMarking(PREVENT_COMPACTION);
     }
=======================================
--- /trunk/src/incremental-marking.h    Thu Nov 10 03:38:15 2011
+++ /trunk/src/incremental-marking.h    Thu Dec  1 00:22:35 2011
@@ -62,6 +62,8 @@
   INLINE(bool IsMarking()) { return state() >= MARKING; }

   inline bool IsMarkingIncomplete() { return state() == MARKING; }
+
+  inline bool IsComplete() { return state() == COMPLETE; }

   bool WorthActivating();

@@ -101,6 +103,7 @@
   void OldSpaceStep(intptr_t allocated) {
     Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
   }
+
   void Step(intptr_t allocated);

   inline void RestartIfNotMarking() {
=======================================
--- /trunk/src/lithium-allocator.cc     Fri Nov 11 04:00:53 2011
+++ /trunk/src/lithium-allocator.cc     Thu Dec  1 00:22:35 2011
@@ -234,7 +234,8 @@
   // at the current or the immediate next position.
   UsePosition* use_pos = NextRegisterPosition(pos);
   if (use_pos == NULL) return true;
-  return use_pos->pos().Value() > pos.NextInstruction().Value();
+  return
+ use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value();
 }


=======================================
--- /trunk/src/log.h    Wed Oct  5 14:44:48 2011
+++ /trunk/src/log.h    Thu Dec  1 00:22:35 2011
@@ -295,7 +295,13 @@
INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));

   // Profiler's sampling interval (in milliseconds).
+#if defined(ANDROID)
+  // Phones and tablets have processors that are much slower than desktop
+  // and laptop computers for which current heuristics are tuned.
+  static const int kSamplingIntervalMs = 5;
+#else
   static const int kSamplingIntervalMs = 1;
+#endif

   // Callback from Log, stops profiling in case of insufficient resources.
   void LogFailure();
=======================================
--- /trunk/src/mark-compact.cc  Tue Nov 29 06:28:56 2011
+++ /trunk/src/mark-compact.cc  Thu Dec  1 00:22:35 2011
@@ -2582,6 +2582,10 @@


 void MarkCompactCollector::EvacuateNewSpace() {
+  // There are soft limits in the allocation code, designed trigger a mark
+  // sweep collection by failing allocations.  But since we are already in
+  // a mark-sweep allocation, there is no sense in trying to trigger one.
+  AlwaysAllocateScope scope;
   heap()->CheckNewSpaceExpansionCriteria();

   NewSpace* new_space = heap()->new_space();
=======================================
--- /trunk/src/mips/code-stubs-mips.cc  Tue Nov 29 06:28:56 2011
+++ /trunk/src/mips/code-stubs-mips.cc  Thu Dec  1 00:22:35 2011
@@ -3360,6 +3360,9 @@
     __ Branch(&calculate, ne, a2, Operand(t0));
     __ Branch(&calculate, ne, a3, Operand(t1));
     // Cache hit. Load result, cleanup and return.
+    Counters* counters = masm->isolate()->counters();
+    __ IncrementCounter(
+        counters->transcendental_cache_hit(), 1, scratch0, scratch1);
     if (tagged) {
       // Pop input value from stack and load result into v0.
       __ Drop(1);
@@ -3372,6 +3375,9 @@
   }  // if (CpuFeatures::IsSupported(FPU))

   __ bind(&calculate);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(
+      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
   if (tagged) {
     __ bind(&invalid_cache);
     __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
@@ -3455,20 +3461,25 @@
     __ mov_d(f12, f4);
   }
   AllowExternalCallThatCantCauseGC scope(masm);
+  Isolate* isolate = masm->isolate();
   switch (type_) {
     case TranscendentalCache::SIN:
       __ CallCFunction(
-          ExternalReference::math_sin_double_function(masm->isolate()),
+          ExternalReference::math_sin_double_function(isolate),
           0, 1);
       break;
     case TranscendentalCache::COS:
       __ CallCFunction(
-          ExternalReference::math_cos_double_function(masm->isolate()),
+          ExternalReference::math_cos_double_function(isolate),
+          0, 1);
+      break;
+    case TranscendentalCache::TAN:
+ __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
           0, 1);
       break;
     case TranscendentalCache::LOG:
       __ CallCFunction(
-          ExternalReference::math_log_double_function(masm->isolate()),
+          ExternalReference::math_log_double_function(isolate),
           0, 1);
       break;
     default:
@@ -3484,6 +3495,7 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -4746,7 +4758,8 @@
   Label seq_string;
   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  // First check for flat string.
+ // First check for flat string. None of the following string type tests will
+  // succeed if kIsNotStringTag is set.
   __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ Branch(&seq_string, eq, a1, Operand(zero_reg));
@@ -4754,6 +4767,7 @@
   // subject: Subject string
   // a0: instance type if Subject string
   // regexp_data: RegExp data (FixedArray)
+  // a1: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
@@ -4763,9 +4777,15 @@
   Label cons_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
   __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));

+  // Catch non-string subject (should already have been guarded against).
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ And(at, a1, Operand(kIsNotStringMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+
   // String is sliced.
   __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ sra(t0, t0, kSmiTagSize);
=======================================
--- /trunk/src/mips/full-codegen-mips.cc        Tue Nov 29 06:28:56 2011
+++ /trunk/src/mips/full-codegen-mips.cc        Thu Dec  1 00:22:35 2011
@@ -3199,6 +3199,19 @@
   __ CallStub(&stub);
   context()->Plug(v0);
 }
+
+
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+ __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}


 void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
=======================================
--- /trunk/src/mips/lithium-codegen-mips.cc     Tue Nov 29 06:28:56 2011
+++ /trunk/src/mips/lithium-codegen-mips.cc     Thu Dec  1 00:22:35 2011
@@ -3068,6 +3068,14 @@
                                TranscendentalCacheStub::UNTAGGED);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
 }
+
+
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}


 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
@@ -3109,6 +3117,9 @@
     case kMathSin:
       DoMathSin(instr);
       break;
+    case kMathTan:
+      DoMathTan(instr);
+      break;
     case kMathLog:
       DoMathLog(instr);
       break;
=======================================
--- /trunk/src/mips/lithium-codegen-mips.h      Tue Nov 29 06:28:56 2011
+++ /trunk/src/mips/lithium-codegen-mips.h      Thu Dec  1 00:22:35 2011
@@ -242,6 +242,7 @@
   void DoMathSqrt(LUnaryMathOperation* instr);
   void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);

=======================================
--- /trunk/src/objects.cc       Tue Nov 29 06:28:56 2011
+++ /trunk/src/objects.cc       Thu Dec  1 00:22:35 2011
@@ -926,7 +926,7 @@
                     len - first_length);
       }
       cs->set_first(result);
-      cs->set_second(heap->empty_string());
+      cs->set_second(heap->empty_string(), SKIP_WRITE_BARRIER);
       return result;
     }
     default:
=======================================
--- /trunk/src/parser.cc        Tue Nov 29 06:28:56 2011
+++ /trunk/src/parser.cc        Thu Dec  1 00:22:35 2011
@@ -3844,9 +3844,11 @@
     ObjectLiteral::Property* property =
         new(zone()) ObjectLiteral::Property(key, value);

- // Mark object literals that contain function literals and pretenure the
-    // literal so it can be added as a constant function property.
-    if (value->AsFunctionLiteral() != NULL) {
+    // Mark top-level object literals that contain function literals and
+    // pretenure the literal so it can be added as a constant function
+    // property.
+    if (top_scope_->DeclarationScope()->is_global_scope() &&
+        value->AsFunctionLiteral() != NULL) {
       has_function = true;
       value->AsFunctionLiteral()->set_pretenure();
     }
=======================================
--- /trunk/src/spaces.cc        Tue Nov 29 06:28:56 2011
+++ /trunk/src/spaces.cc        Thu Dec  1 00:22:35 2011
@@ -2142,35 +2142,38 @@
 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   // Allocation in this space has failed.

- // Free list allocation failed and there is no next page. Fail if we have
-  // hit the old generation size limit that should cause a garbage
-  // collection.
-  if (!heap()->always_allocate() &&
-      heap()->OldGenerationAllocationLimitReached()) {
-    return NULL;
-  }
-
-  // If there are unswept pages advance lazy sweeper.
+ // If there are unswept pages advance lazy sweeper then sweep one page before
+  // allocating a new page.
   if (first_unswept_page_->is_valid()) {
     AdvanceSweeper(size_in_bytes);

     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
     if (object != NULL) return object;
-
-    if (!IsSweepingComplete()) {
-      AdvanceSweeper(kMaxInt);
-
-      // Retry the free list allocation.
-      object = free_list_.Allocate(size_in_bytes);
-      if (object != NULL) return object;
-    }
+  }
+
+ // Free list allocation failed and there is no next page. Fail if we have
+  // hit the old generation size limit that should cause a garbage
+  // collection.
+  if (!heap()->always_allocate() &&
+      heap()->OldGenerationAllocationLimitReached()) {
+    return NULL;
   }

   // Try to expand the space and allocate in the new next page.
   if (Expand()) {
     return free_list_.Allocate(size_in_bytes);
   }
+
+ // Last ditch, sweep all the remaining pages to try to find space. This may
+  // cause a pause.
+  if (!IsSweepingComplete()) {
+    AdvanceSweeper(kMaxInt);
+
+    // Retry the free list allocation.
+    HeapObject* object = free_list_.Allocate(size_in_bytes);
+    if (object != NULL) return object;
+  }

   // Finally, fail.
   return NULL;
=======================================
--- /trunk/src/spaces.h Tue Nov 29 06:28:56 2011
+++ /trunk/src/spaces.h Thu Dec  1 00:22:35 2011
@@ -1650,7 +1650,8 @@
   Page* first_unswept_page_;

// Expands the space by allocating a fixed number of pages. Returns false if
-  // it cannot allocate requested number of pages from OS.
+ // it cannot allocate requested number of pages from OS, or if the hard heap
+  // size limit has been hit.
   bool Expand();

// Generic fast case allocation function that tries linear allocation at the
=======================================
--- /trunk/src/v8.cc    Thu Oct 27 00:38:48 2011
+++ /trunk/src/v8.cc    Thu Dec  1 00:22:35 2011
@@ -166,13 +166,13 @@
 }


-bool V8::IdleNotification() {
+bool V8::IdleNotification(int hint) {
   // Returning true tells the caller that there is no need to call
   // IdleNotification again.
   if (!FLAG_use_idle_notification) return true;

   // Tell the heap that it may want to adjust.
-  return HEAP->IdleNotification();
+  return HEAP->IdleNotification(hint);
 }


=======================================
--- /trunk/src/v8.h     Thu Oct 27 00:38:48 2011
+++ /trunk/src/v8.h     Thu Dec  1 00:22:35 2011
@@ -106,7 +106,7 @@
                                           Context* context);

   // Idle notification directly from the API.
-  static bool IdleNotification();
+  static bool IdleNotification(int hint);

  private:
   static void InitializeOncePerProcess();
=======================================
--- /trunk/src/version.cc       Tue Nov 29 06:28:56 2011
+++ /trunk/src/version.cc       Thu Dec  1 00:22:35 2011
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     7
-#define BUILD_NUMBER      11
+#define BUILD_NUMBER      12
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
=======================================
--- /trunk/test/cctest/test-api.cc      Tue Nov 29 06:28:56 2011
+++ /trunk/test/cctest/test-api.cc      Thu Dec  1 00:22:35 2011
@@ -13439,13 +13439,60 @@

 // Test that idle notification can be handled and eventually returns true.
 THREADED_TEST(IdleNotification) {
+  v8::HandleScope scope;
+  LocalContext env;
+  CompileRun("function binom(n, m) {"
+             "  var C = [[1]];"
+             "  for (var i = 1; i <= n; ++i) {"
+             "    C[i] = [1];"
+             "    for (var j = 1; j < i; ++j) {"
+             "      C[i][j] = C[i-1][j-1] + C[i-1][j];"
+             "    }"
+             "    C[i][i] = 1;"
+             "  }"
+             "  return C[n][m];"
+             "};"
+             "binom(1000, 500)");
   bool rv = false;
+  intptr_t old_size = HEAP->SizeOfObjects();
+  bool no_idle_work = v8::V8::IdleNotification();
   for (int i = 0; i < 100; i++) {
     rv = v8::V8::IdleNotification();
     if (rv)
       break;
   }
   CHECK(rv == true);
+  intptr_t new_size = HEAP->SizeOfObjects();
+  CHECK(no_idle_work || new_size < 3 * old_size / 4);
+}
+
+// Test that idle notification can be handled and eventually returns true.
+THREADED_TEST(IdleNotificationWithHint) {
+  v8::HandleScope scope;
+  LocalContext env;
+  CompileRun("function binom(n, m) {"
+             "  var C = [[1]];"
+             "  for (var i = 1; i <= n; ++i) {"
+             "    C[i] = [1];"
+             "    for (var j = 1; j < i; ++j) {"
+             "      C[i][j] = C[i-1][j-1] + C[i-1][j];"
+             "    }"
+             "    C[i][i] = 1;"
+             "  }"
+             "  return C[n][m];"
+             "};"
+             "binom(1000, 500)");
+  bool rv = false;
+  intptr_t old_size = HEAP->SizeOfObjects();
+  bool no_idle_work = v8::V8::IdleNotification(10);
+  for (int i = 0; i < 200; i++) {
+    rv = v8::V8::IdleNotification(10);
+    if (rv)
+      break;
+  }
+  CHECK(rv == true);
+  intptr_t new_size = HEAP->SizeOfObjects();
+  CHECK(no_idle_work || new_size < 3 * old_size / 4);
 }


=======================================
--- /trunk/test/cctest/test-heap.cc     Thu Nov 10 03:38:15 2011
+++ /trunk/test/cctest/test-heap.cc     Thu Dec  1 00:22:35 2011
@@ -1289,3 +1289,31 @@
   new_capacity = new_space->Capacity();
   CHECK(old_capacity == new_capacity);
 }
+
+
+TEST(IdleNotificationAdvancesIncrementalMarking) {
+  if (!FLAG_incremental_marking || !FLAG_incremental_marking_steps) return;
+  InitializeVM();
+  v8::HandleScope scope;
+  const char* source = "function binom(n, m) {"
+                       "  var C = [[1]];"
+                       "  for (var i = 1; i <= n; ++i) {"
+                       "    C[i] = [1];"
+                       "    for (var j = 1; j < i; ++j) {"
+                       "      C[i][j] = C[i-1][j-1] + C[i-1][j];"
+                       "    }"
+                       "    C[i][i] = 1;"
+                       "  }"
+                       "  return C[n][m];"
+                       "};"
+                       "binom(1000, 500)";
+  {
+    AlwaysAllocateScope aa_scope;
+    CompileRun(source);
+  }
+  intptr_t old_size = HEAP->SizeOfObjects();
+  bool no_idle_work = v8::V8::IdleNotification();
+  while (!v8::V8::IdleNotification()) ;
+  intptr_t new_size = HEAP->SizeOfObjects();
+  CHECK(no_idle_work || new_size < 3 * old_size / 4);
+}
=======================================
--- /trunk/test/cctest/test-mark-compact.cc     Wed Oct  5 14:44:48 2011
+++ /trunk/test/cctest/test-mark-compact.cc     Thu Dec  1 00:22:35 2011
@@ -142,9 +142,6 @@

   // Call mark compact GC, and it should pass.
   HEAP->CollectGarbage(OLD_POINTER_SPACE);
-
-  // array should not be promoted because the old space is full.
-  CHECK(HEAP->InSpace(*array, NEW_SPACE));
 }


=======================================
--- /trunk/test/mjsunit/debug-step-3.js Thu Oct 27 00:38:48 2011
+++ /trunk/test/mjsunit/debug-step-3.js Thu Dec  1 00:22:35 2011
@@ -82,8 +82,7 @@
// Step through the function ensuring that the var statements are hit as well.
 prepare_step_test();
 f();
-// TODO(1782): Fix issue to bring back this assert.
-//assertEquals(4, step_count);
+assertEquals(4, step_count);

 // Clear the breakpoint and check that no stepping happens.
 Debug.clearBreakPoint(bp);

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to