Revision: 25034
Author:   [email protected]
Date:     Fri Oct 31 13:19:44 2014 UTC
Log: Version 3.30.23 (based on 4a679c828899ac37630b1748caface5b816da87c)

Introduce v8::Exception::GetMessage to find location of an error object (Chromium issue 427954).

Performance and stability improvements on all platforms.
https://code.google.com/p/v8/source/detail?r=25034

Added:
 /trunk/test/mjsunit/asm/math-ceil.js
 /trunk/test/mjsunit/asm/math-floor.js
 /trunk/test/mjsunit/asm/uint32-less-than-shift.js
 /trunk/test/mjsunit/debug-step-turbofan.js
 /trunk/test/mjsunit/regress/regress-crbug-410033.js
 /trunk/test/mjsunit/serialize-embedded-error.js
Deleted:
 /trunk/src/compiler/node-matchers.cc
Modified:
 /trunk/BUILD.gn
 /trunk/ChangeLog
 /trunk/Makefile
 /trunk/include/v8.h
 /trunk/src/api.cc
 /trunk/src/api.h
 /trunk/src/arm/assembler-arm.cc
 /trunk/src/arm/assembler-arm.h
 /trunk/src/arm/constants-arm.h
 /trunk/src/arm/disasm-arm.cc
 /trunk/src/arm/simulator-arm.cc
 /trunk/src/ast-value-factory.cc
 /trunk/src/ast-value-factory.h
 /trunk/src/ast.h
 /trunk/src/compiler/arm/code-generator-arm.cc
 /trunk/src/compiler/arm/instruction-codes-arm.h
 /trunk/src/compiler/arm/instruction-selector-arm.cc
 /trunk/src/compiler/arm/linkage-arm.cc
 /trunk/src/compiler/arm64/code-generator-arm64.cc
 /trunk/src/compiler/arm64/instruction-codes-arm64.h
 /trunk/src/compiler/arm64/instruction-selector-arm64.cc
 /trunk/src/compiler/arm64/linkage-arm64.cc
 /trunk/src/compiler/change-lowering.cc
 /trunk/src/compiler/code-generator-impl.h
 /trunk/src/compiler/code-generator.h
 /trunk/src/compiler/common-operator.cc
 /trunk/src/compiler/graph-visualizer.cc
 /trunk/src/compiler/ia32/code-generator-ia32.cc
 /trunk/src/compiler/ia32/instruction-codes-ia32.h
 /trunk/src/compiler/ia32/instruction-selector-ia32.cc
 /trunk/src/compiler/ia32/linkage-ia32.cc
 /trunk/src/compiler/instruction-selector-impl.h
 /trunk/src/compiler/instruction-selector.cc
 /trunk/src/compiler/instruction-selector.h
 /trunk/src/compiler/instruction.cc
 /trunk/src/compiler/instruction.h
 /trunk/src/compiler/js-builtin-reducer.cc
 /trunk/src/compiler/js-builtin-reducer.h
 /trunk/src/compiler/js-graph.cc
 /trunk/src/compiler/linkage-impl.h
 /trunk/src/compiler/linkage.cc
 /trunk/src/compiler/linkage.h
 /trunk/src/compiler/machine-operator-reducer.cc
 /trunk/src/compiler/machine-type.h
 /trunk/src/compiler/mips/instruction-selector-mips.cc
 /trunk/src/compiler/mips/linkage-mips.cc
 /trunk/src/compiler/node-matchers.h
 /trunk/src/compiler/pipeline.cc
 /trunk/src/compiler/raw-machine-assembler.cc
 /trunk/src/compiler/raw-machine-assembler.h
 /trunk/src/compiler/register-allocator.cc
 /trunk/src/compiler/register-allocator.h
 /trunk/src/compiler/simplified-lowering.cc
 /trunk/src/compiler/x64/code-generator-x64.cc
 /trunk/src/compiler/x64/instruction-codes-x64.h
 /trunk/src/compiler/x64/instruction-selector-x64.cc
 /trunk/src/compiler/x64/linkage-x64.cc
 /trunk/src/compiler.cc
 /trunk/src/interface-descriptors.cc
 /trunk/src/interface-descriptors.h
 /trunk/src/isolate.cc
 /trunk/src/isolate.h
 /trunk/src/messages.js
 /trunk/src/objects-printer.cc
 /trunk/src/parser.cc
 /trunk/src/runtime/runtime-debug.cc
 /trunk/src/version.cc
 /trunk/test/cctest/compiler/codegen-tester.h
 /trunk/test/cctest/compiler/test-codegen-deopt.cc
 /trunk/test/cctest/compiler/test-instruction.cc
 /trunk/test/cctest/compiler/test-run-machops.cc
 /trunk/test/cctest/compiler/test-simplified-lowering.cc
 /trunk/test/cctest/test-api.cc
 /trunk/test/cctest/test-assembler-arm.cc
 /trunk/test/cctest/test-disasm-arm.cc
 /trunk/test/mjsunit/mjsunit.status
 /trunk/test/mozilla/mozilla.status
 /trunk/test/unittests/compiler/change-lowering-unittest.cc
 /trunk/test/unittests/compiler/instruction-selector-unittest.cc
 /trunk/test/unittests/compiler/instruction-selector-unittest.h
 /trunk/test/unittests/compiler/js-builtin-reducer-unittest.cc
 /trunk/test/unittests/compiler/machine-operator-reducer-unittest.cc
 /trunk/test/unittests/compiler/node-test-utils.cc
 /trunk/test/unittests/compiler/node-test-utils.h
 /trunk/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
 /trunk/test/webkit/webkit.status
 /trunk/tools/gyp/v8.gyp
 /trunk/tools/push-to-trunk/releases.py

=======================================
--- /dev/null
+++ /trunk/test/mjsunit/asm/math-ceil.js        Fri Oct 31 13:19:44 2014 UTC
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+  "use asm";
+
+  var ceil = stdlib.Math.ceil;
+
+  // f: double -> float
+  function f(a) {
+    a = +a;
+    return ceil(a);
+  }
+
+  return { f: f };
+}
+
+var f = Module({ Math: Math }).f;
+
+assertTrue(isNaN(f(NaN)));
+assertTrue(isNaN(f(undefined)));
+assertTrue(isNaN(f(function() {})));
+
+assertEquals(0,                   f(0));
+assertEquals(+0,                  f(+0));
+assertEquals(-0,                  f(-0));
+assertEquals(1,                   f(0.49999));
+assertEquals(1,                   f(0.6));
+assertEquals(1,                   f(0.5));
+assertEquals(-0,                  f(-0.1));
+assertEquals(-0,                  f(-0.5));
+assertEquals(-0,                  f(-0.6));
+assertEquals(-1,                  f(-1.6));
+assertEquals(-0,                  f(-0.50001));
+
+assertEquals("Infinity", String(f(Infinity)));
+assertEquals("-Infinity", String(f(-Infinity)));
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/asm/math-floor.js       Fri Oct 31 13:19:44 2014 UTC
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+  "use asm";
+
+  var floor = stdlib.Math.floor;
+
+  // f: double -> float
+  function f(a) {
+    a = +a;
+    return floor(a);
+  }
+
+  return { f: f };
+}
+
+var f = Module({ Math: Math }).f;
+
+assertTrue(isNaN(f(NaN)));
+assertTrue(isNaN(f(undefined)));
+assertTrue(isNaN(f(function() {})));
+
+assertEquals(0,                   f(0));
+assertEquals(+0,                  f(+0));
+assertEquals(-0,                  f(-0));
+assertEquals(0,                   f(0.49999));
+assertEquals(+0,                  f(0.6));
+assertEquals(+0,                  f(0.5));
+assertEquals(-1,                  f(-0.1));
+assertEquals(-1,                  f(-0.5));
+assertEquals(-1,                  f(-0.6));
+assertEquals(-2,                  f(-1.6));
+assertEquals(-1,                  f(-0.50001));
+
+assertEquals("Infinity", String(f(Infinity)));
+assertEquals("-Infinity", String(f(-Infinity)));
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/asm/uint32-less-than-shift.js Fri Oct 31 13:19:44 2014 UTC
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib, foreign, heap) {
+  'use asm';
+
+  function foo1(i1) {
+    i1 = i1 | 0;
+    var i10 = i1 >> 5;
+    if (i10 >>> 0 < 5) {
+      return 1;
+    } else {
+      return 0;
+    }
+    return 0;
+  }
+
+  function foo2(i1) {
+    i1 = i1 | 0;
+    var i10 = i1 / 32 | 0;
+    if (i10 >>> 0 < 5) {
+      return 1;
+    } else {
+      return 0;
+    }
+    return 0;
+  }
+
+  function foo3(i1) {
+    i1 = i1 | 0;
+    var i10 = (i1 + 32 | 0) / 32 | 0;
+    if (i10 >>> 0 < 5) {
+      return 1;
+    } else {
+      return 0;
+    }
+    return 0;
+  }
+  return {foo1: foo1, foo2: foo2, foo3: foo3};
+}
+
+var m = Module(this, {}, undefined);
+
+for (var i = 0; i < 4 * 32; i++) {
+  assertEquals(1, m.foo1(i));
+  assertEquals(1, m.foo2(i));
+  assertEquals(1, m.foo3(i));
+}
+
+for (var i = 4 * 32; i < 5 * 32; i++) {
+  assertEquals(1, m.foo1(i));
+  assertEquals(1, m.foo2(i));
+  assertEquals(0, m.foo3(i));
+}
+
+for (var i = 5 * 32; i < 10 * 32; i++) {
+  assertEquals(0, m.foo1(i));
+  assertEquals(0, m.foo2(i));
+  assertEquals(0, m.foo3(i));
+}
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/debug-step-turbofan.js  Fri Oct 31 13:19:44 2014 UTC
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-filter=g --allow-natives-syntax
+
+// Test that Debug::PrepareForBreakPoints can deal with turbofan code (g)
+// on the stack.  Without deoptimization support, we will not be able to
+// replace optimized code for g by unoptimized code with debug break slots.
+// This would cause stepping to fail (V8 issue 3660).
+
+function f(x) {
+  g(x);
+  var a = 0;              // Break 6
+  return a;               // Break 7
+}                         // Break 8
+
+function g(x) {
+  if (x) h();
+  var a = 0;              // Break 2
+  var b = 1;              // Break 3
+  return a + b;           // Break 4
+}                         // Break 5
+
+function h() {
+  debugger;               // Break 0
+}                         // Break 1
+
+Debug = debug.Debug;
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+  if (event != Debug.DebugEvent.Break) return;
+  try {
+    exec_state.prepareStep(Debug.StepAction.StepNext, 1);
+    print(exec_state.frame(0).sourceLineText());
+    var match = exec_state.frame(0).sourceLineText().match(/Break (\d)/);
+    assertNotNull(match);
+    assertEquals(break_count++, parseInt(match[1]));
+  } catch (e) {
+    print(e + e.stack);
+    exception = e;
+  }
+}
+
+f(0);
+f(0);
+%OptimizeFunctionOnNextCall(g);
+
+Debug.setListener(listener);
+
+f(1);
+
+Debug.setListener(null);  // Break 9
+assertNull(exception);
+assertEquals(10, break_count);
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/regress/regress-crbug-410033.js Fri Oct 31 13:19:44 2014 UTC
@@ -0,0 +1,7 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+%GetScript('v8/gc');
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/serialize-embedded-error.js Fri Oct 31 13:19:44 2014 UTC
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// --serialize-toplevel --cache=code
+
+var caught = false;
+try {
+  parseInt() = 0;
+} catch(e) {
+  caught = true;
+}
+assertTrue(caught);
=======================================
--- /trunk/src/compiler/node-matchers.cc        Wed Oct  1 00:05:35 2014 UTC
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/generic-node-inl.h"
-#include "src/compiler/node-matchers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
-
-
-ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
-    : NodeMatcher(node), left_(NULL), power_(0) {
-  if (opcode() != IrOpcode::kInt32Mul) return;
-  // TODO(dcarney): should test 64 bit ints as well.
-  Int32BinopMatcher m(this->node());
-  if (!m.right().HasValue()) return;
-  int32_t value = m.right().Value();
-  switch (value) {
-    case 8:
-      power_++;  // Fall through.
-    case 4:
-      power_++;  // Fall through.
-    case 2:
-      power_++;  // Fall through.
-    case 1:
-      break;
-    default:
-      return;
-  }
-  left_ = m.left().node();
-}
-
-
-IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
-    : NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
-  if (opcode() == IrOpcode::kInt32Add) {
-    Int32BinopMatcher m(this->node());
-    if (m.right().HasValue()) {
-      displacement_ = m.right().Value();
-      index_node_ = m.left().node();
-    }
-  }
-  // Test scale factor.
-  ScaleFactorMatcher scale_matcher(index_node_);
-  if (scale_matcher.Matches()) {
-    index_node_ = scale_matcher.Left();
-    power_ = scale_matcher.Power();
-  }
-}
-
-
-const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
-
-
-LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
-    : NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
-  if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
-    return;
-  }
-  int64_t value;
-  Node* left = NULL;
-  {
-    Int32BinopMatcher m(this->node());
-    if (m.right().HasValue()) {
-      value = m.right().Value();
-      left = m.left().node();
-    } else {
-      Int64BinopMatcher m(this->node());
-      if (m.right().HasValue()) {
-        value = m.right().Value();
-        left = m.left().node();
-      } else {
-        return;
-      }
-    }
-  }
-  switch (value) {
-    case 9:
-    case 8:
-      power_++;  // Fall through.
-    case 5:
-    case 4:
-      power_++;  // Fall through.
-    case 3:
-    case 2:
-      power_++;  // Fall through.
-    case 1:
-      break;
-    default:
-      return;
-  }
-  if (!base::bits::IsPowerOfTwo64(value)) {
-    displacement_ = 1;
-  }
-  left_ = left;
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
=======================================
--- /trunk/BUILD.gn     Wed Oct 29 14:24:00 2014 UTC
+++ /trunk/BUILD.gn     Fri Oct 31 13:19:44 2014 UTC
@@ -550,7 +550,6 @@
     "src/compiler/node-aux-data.h",
     "src/compiler/node-cache.cc",
     "src/compiler/node-cache.h",
-    "src/compiler/node-matchers.cc",
     "src/compiler/node-matchers.h",
     "src/compiler/node-properties-inl.h",
     "src/compiler/node-properties.h",
=======================================
--- /trunk/ChangeLog    Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/ChangeLog    Fri Oct 31 13:19:44 2014 UTC
@@ -1,3 +1,11 @@
+2014-10-31: Version 3.30.23
+
+ Introduce v8::Exception::GetMessage to find location of an error object
+        (Chromium issue 427954).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-10-30: Version 3.30.22

MIPS: Classes: Add super support in methods and accessors (issue 3330).
=======================================
--- /trunk/Makefile     Wed Oct  8 00:05:11 2014 UTC
+++ /trunk/Makefile     Fri Oct 31 13:19:44 2014 UTC
@@ -257,7 +257,7 @@
 ENVFILE = $(OUTDIR)/environment

 .PHONY: all check clean builddeps dependencies $(ENVFILE).new native \
-        qc quickcheck $(QUICKCHECKS) \
+        qc quickcheck $(QUICKCHECKS) turbocheck \
$(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \ $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
         $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
@@ -386,6 +386,15 @@
            --arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
 qc: quickcheck

+turbocheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
+       tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+           --arch-and-mode=$(SUPERFASTTESTMODES) $(TESTFLAGS) \
+           --quickcheck --variants=turbofan --download-data mozilla webkit
+       tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+           --arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) \
+           --quickcheck --variants=turbofan
+tc: turbocheck
+
# Clean targets. You can clean each architecture individually, or everything.
 $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
        rm -f $(OUTDIR)/Makefile.$(basename $@)*
=======================================
--- /trunk/include/v8.h Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/include/v8.h Fri Oct 31 13:19:44 2014 UTC
@@ -4164,6 +4164,9 @@
   static Local<Value> TypeError(Handle<String> message);
   static Local<Value> Error(Handle<String> message);

+  static Local<Message> GetMessage(Handle<Value> exception);
+
+  // DEPRECATED. Use GetMessage()->GetStackTrace()
   static Local<StackTrace> GetStackTrace(Handle<Value> exception);
 };

@@ -4224,6 +4227,8 @@
   V8_INLINE Handle<Promise> GetPromise() const { return promise_; }
   V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
   V8_INLINE Handle<Value> GetValue() const { return value_; }
+
+  // DEPRECATED. Use v8::Exception::GetMessage(GetValue())->GetStackTrace()
V8_INLINE Handle<StackTrace> GetStackTrace() const { return stack_trace_; }

  private:
@@ -5505,8 +5510,16 @@
    * all TryCatch blocks should be stack allocated because the memory
    * location itself is compared against JavaScript try/catch blocks.
    */
+  // TODO(dcarney): deprecate.
   TryCatch();

+  /**
+   * Creates a new try/catch block and registers it with v8.  Note that
+   * all TryCatch blocks should be stack allocated because the memory
+   * location itself is compared against JavaScript try/catch blocks.
+   */
+  TryCatch(Isolate* isolate);
+
   /**
    * Unregisters and deletes this try/catch block.
    */
=======================================
--- /trunk/src/api.cc   Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/api.cc   Fri Oct 31 13:19:44 2014 UTC
@@ -730,11 +730,11 @@
// about this there is no HandleScope in this method. When you add one to the // site calling this method you should check that you ensured the VM was not
 // dead first.
-void NeanderArray::add(i::Handle<i::Object> value) {
+void NeanderArray::add(i::Isolate* isolate, i::Handle<i::Object> value) {
   int length = this->length();
   int size = obj_.size();
   if (length == size - 1) {
-    i::Factory* factory = i::Isolate::Current()->factory();
+    i::Factory* factory = isolate->factory();
     i::Handle<i::FixedArray> new_elms = factory->NewFixedArray(2 * size);
     for (int i = 0; i < length; i++)
       new_elms->set(i + 1, get(i));
@@ -769,12 +769,12 @@
     Utils::OpenHandle(templ)->set_property_list(*list);
   }
   NeanderArray array(list);
-  array.add(isolate->factory()->NewNumberFromInt(length));
+  array.add(isolate, isolate->factory()->NewNumberFromInt(length));
   for (int i = 0; i < length; i++) {
     i::Handle<i::Object> value = data[i].IsEmpty() ?
         i::Handle<i::Object>(isolate->factory()->undefined_value()) :
         Utils::OpenHandle(*data[i]);
-    array.add(value);
+    array.add(isolate, value);
   }
 }

@@ -782,7 +782,7 @@
 void Template::Set(v8::Handle<Name> name,
                    v8::Handle<Data> value,
                    v8::PropertyAttribute attribute) {
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   const int kSize = 3;
@@ -1064,11 +1064,9 @@


 int TypeSwitch::match(v8::Handle<Value> value) {
-  i::Isolate* isolate = i::Isolate::Current();
-  LOG_API(isolate, "TypeSwitch::match");
-  USE(isolate);
+  i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
+  LOG_API(info->GetIsolate(), "TypeSwitch::match");
   i::Handle<i::Object> obj = Utils::OpenHandle(*value);
-  i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
   i::FixedArray* types = i::FixedArray::cast(info->types());
   for (int i = 0; i < types->length(); i++) {
     if (i::FunctionTemplateInfo::cast(types->get(i))->IsTemplateFor(*obj))
@@ -1272,7 +1270,7 @@
     info->set_property_accessors(*list);
   }
   NeanderArray array(list);
-  array.add(obj);
+  array.add(isolate, obj);
 }


@@ -1898,10 +1896,26 @@
           v8::internal::GetCurrentStackPosition()));
   isolate_->RegisterTryCatchHandler(this);
 }
+
+
+v8::TryCatch::TryCatch(v8::Isolate* isolate)
+    : isolate_(reinterpret_cast<i::Isolate*>(isolate)),
+      next_(isolate_->try_catch_handler()),
+      is_verbose_(false),
+      can_continue_(true),
+      capture_message_(true),
+      rethrow_(false),
+      has_terminated_(false) {
+  ResetInternal();
+  // Special handling for simulators which have a separate JS stack.
+  js_stack_comparable_address_ =
+ reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
+          v8::internal::GetCurrentStackPosition()));
+  isolate_->RegisterTryCatchHandler(this);
+}


 v8::TryCatch::~TryCatch() {
-  DCHECK(isolate_ == i::Isolate::Current());
   if (rethrow_) {
     v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_);
     v8::HandleScope scope(isolate);
@@ -1954,7 +1968,6 @@


 v8::Local<Value> v8::TryCatch::Exception() const {
-  DCHECK(isolate_ == i::Isolate::Current());
   if (HasCaught()) {
     // Check for out of memory exception.
     i::Object* exception = reinterpret_cast<i::Object*>(exception_);
@@ -1966,7 +1979,6 @@


 v8::Local<Value> v8::TryCatch::StackTrace() const {
-  DCHECK(isolate_ == i::Isolate::Current());
   if (HasCaught()) {
     i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
     if (!raw_obj->IsJSObject()) return v8::Local<Value>();
@@ -1990,7 +2002,6 @@


 v8::Local<v8::Message> v8::TryCatch::Message() const {
-  DCHECK(isolate_ == i::Isolate::Current());
   i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
   DCHECK(message->IsJSMessageObject() || message->IsTheHole());
   if (HasCaught() && !message->IsTheHole()) {
@@ -2002,7 +2013,6 @@


 void v8::TryCatch::Reset() {
-  DCHECK(isolate_ == i::Isolate::Current());
   if (!rethrow_ && HasCaught() && isolate_->has_scheduled_exception()) {
// If an exception was caught but is still scheduled because no API call // promoted it, then it is canceled to prevent it from being propagated.
@@ -2090,11 +2100,8 @@


 MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
-    const char* name,
-    i::Handle<i::Object> recv,
-    int argc,
+ i::Isolate* isolate, const char* name, i::Handle<i::Object> recv, int argc,
     i::Handle<i::Object> argv[]) {
-  i::Isolate* isolate = i::Isolate::Current();
   i::Handle<i::Object> object_fun =
       i::Object::GetProperty(
           isolate, isolate->js_builtins_object(), name).ToHandleChecked();
@@ -2104,13 +2111,10 @@


 MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
-    const char* name,
-    i::Handle<i::Object> data) {
+    i::Isolate* isolate, const char* name, i::Handle<i::Object> data) {
   i::Handle<i::Object> argv[] = { data };
-  return CallV8HeapFunction(name,
-                            i::Isolate::Current()->js_builtins_object(),
-                            arraysize(argv),
-                            argv);
+  return CallV8HeapFunction(isolate, name, isolate->js_builtins_object(),
+                            arraysize(argv), argv);
 }


@@ -2122,8 +2126,9 @@

   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result;
-  has_pending_exception = !CallV8HeapFunction(
-      "GetLineNumber", Utils::OpenHandle(this)).ToHandle(&result);
+  has_pending_exception =
+ !CallV8HeapFunction(isolate, "GetLineNumber", Utils::OpenHandle(this))
+           .ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, 0);
   return static_cast<int>(result->Number());
 }
@@ -2157,8 +2162,9 @@
   i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> start_col_obj;
-  has_pending_exception = !CallV8HeapFunction(
-      "GetPositionInLine", data_obj).ToHandle(&start_col_obj);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "GetPositionInLine", data_obj)
+           .ToHandle(&start_col_obj);
   EXCEPTION_BAILOUT_CHECK(isolate, 0);
   return static_cast<int>(start_col_obj->Number());
 }
@@ -2172,8 +2178,9 @@
   i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> start_col_obj;
-  has_pending_exception = !CallV8HeapFunction(
-      "GetPositionInLine", data_obj).ToHandle(&start_col_obj);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "GetPositionInLine", data_obj)
+           .ToHandle(&start_col_obj);
   EXCEPTION_BAILOUT_CHECK(isolate, 0);
   i::Handle<i::JSMessageObject> message =
       i::Handle<i::JSMessageObject>::cast(data_obj);
@@ -2203,8 +2210,9 @@
   EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result;
-  has_pending_exception = !CallV8HeapFunction(
-      "GetSourceLine", Utils::OpenHandle(this)).ToHandle(&result);
+  has_pending_exception =
+ !CallV8HeapFunction(isolate, "GetSourceLine", Utils::OpenHandle(this))
+           .ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>());
   if (result->IsString()) {
return scope.Escape(Utils::ToLocal(i::Handle<i::String>::cast(result)));
@@ -3021,8 +3029,9 @@
   i::Handle<i::Object> args[] = { other };
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result;
-  has_pending_exception = !CallV8HeapFunction(
-      "EQUALS", obj, arraysize(args), args).ToHandle(&result);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "EQUALS", obj, arraysize(args), args)
+           .ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, false);
   return *result == i::Smi::FromInt(i::EQUAL);
 }
@@ -3287,11 +3296,10 @@
   i::Handle<i::Object> args[] = { obj, key_name };
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result;
-  has_pending_exception = !CallV8HeapFunction(
-      "ObjectGetOwnPropertyDescriptor",
-      isolate->factory()->undefined_value(),
-      arraysize(args),
-      args).ToHandle(&result);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "ObjectGetOwnPropertyDescriptor",
+                          isolate->factory()->undefined_value(),
+                          arraysize(args), args).ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
   return Utils::ToLocal(result);
 }
@@ -5267,25 +5275,25 @@


 void v8::Context::SetSecurityToken(Handle<Value> token) {
-  i::Isolate* isolate = i::Isolate::Current();
-  ENTER_V8(isolate);
   i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Isolate* isolate = env->GetIsolate();
+  ENTER_V8(isolate);
   i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
   env->set_security_token(*token_handle);
 }


 void v8::Context::UseDefaultSecurityToken() {
-  i::Isolate* isolate = i::Isolate::Current();
-  ENTER_V8(isolate);
   i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Isolate* isolate = env->GetIsolate();
+  ENTER_V8(isolate);
   env->set_security_token(env->global_object());
 }


 Handle<Value> v8::Context::GetSecurityToken() {
-  i::Isolate* isolate = i::Isolate::Current();
   i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Isolate* isolate = env->GetIsolate();
   i::Object* security_token = env->security_token();
   i::Handle<i::Object> token_handle(security_token, isolate);
   return Utils::ToLocal(token_handle);
@@ -5344,40 +5352,42 @@


 Local<v8::Object> ObjectTemplate::NewInstance() {
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Handle<i::ObjectTemplateInfo> info = Utils::OpenHandle(this);
+  i::Isolate* isolate = info->GetIsolate();
   ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
              return Local<v8::Object>());
   LOG_API(isolate, "ObjectTemplate::NewInstance");
   ENTER_V8(isolate);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> obj;
-  has_pending_exception = !i::Execution::InstantiateObject(
-      Utils::OpenHandle(this)).ToHandle(&obj);
+ has_pending_exception = !i::Execution::InstantiateObject(info).ToHandle(&obj);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
   return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
 }


 Local<v8::Function> FunctionTemplate::GetFunction() {
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Handle<i::FunctionTemplateInfo> info = Utils::OpenHandle(this);
+  i::Isolate* isolate = info->GetIsolate();
   ON_BAILOUT(isolate, "v8::FunctionTemplate::GetFunction()",
              return Local<v8::Function>());
   LOG_API(isolate, "FunctionTemplate::GetFunction");
   ENTER_V8(isolate);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> obj;
-  has_pending_exception = !i::Execution::InstantiateFunction(
-      Utils::OpenHandle(this)).ToHandle(&obj);
+  has_pending_exception =
+      !i::Execution::InstantiateFunction(info).ToHandle(&obj);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Function>());
   return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
 }


 bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
- ON_BAILOUT(i::Isolate::Current(), "v8::FunctionTemplate::HasInstanceOf()",
-             return false);
+  i::Handle<i::FunctionTemplateInfo> info = Utils::OpenHandle(this);
+  i::Isolate* isolate = info->GetIsolate();
+ ON_BAILOUT(isolate, "v8::FunctionTemplate::HasInstanceOf()", return false);
   i::Object* obj = *Utils::OpenHandle(*value);
-  return Utils::OpenHandle(this)->IsTemplateFor(obj);
+  return info->IsTemplateFor(obj);
 }


@@ -6807,7 +6817,7 @@
   obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
   obj.set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
                             : *Utils::OpenHandle(*data));
-  listeners.add(obj.value());
+  listeners.add(isolate, obj.value());
   return true;
 }

@@ -6961,6 +6971,17 @@
 #undef DEFINE_ERROR


+Local<Message> Exception::GetMessage(Handle<Value> exception) {
+  i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
+  if (!obj->IsHeapObject()) return Local<Message>();
+  i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  return Utils::MessageToLocal(
+      scope.CloseAndEscape(isolate->CreateMessage(obj, NULL)));
+}
+
+
 Local<StackTrace> Exception::GetStackTrace(Handle<Value> exception) {
   i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
   if (!obj->IsJSObject()) return Local<StackTrace>();
=======================================
--- /trunk/src/api.h    Wed Oct  1 00:05:35 2014 UTC
+++ /trunk/src/api.h    Fri Oct 31 13:19:44 2014 UTC
@@ -54,7 +54,8 @@
     return obj_.value();
   }

-  void add(v8::internal::Handle<v8::internal::Object> value);
+  void add(internal::Isolate* isolate,
+           v8::internal::Handle<v8::internal::Object> value);

   int length();

=======================================
--- /trunk/src/arm/assembler-arm.cc     Wed Oct 29 14:24:00 2014 UTC
+++ /trunk/src/arm/assembler-arm.cc     Fri Oct 31 13:19:44 2014 UTC
@@ -3092,6 +3092,76 @@
emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
        m*B5 | vm);
 }
+
+
+void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
+  // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
+  // M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
+       0x5 * B9 | B8 | B6 | m * B5 | vm);
+}
+
+
+void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
+  // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
+  // M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
+       vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
+}
+
+
+void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
+  // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
+  // M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
+       vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
+}
+
+
+void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
+  // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
+  // M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
+       vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
+}
+
+
+void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
+                       const Condition cond) {
+  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
+       0x5 * B9 | B8 | B7 | B6 | m * B5 | vm);
+}


 // Support for NEON.
=======================================
--- /trunk/src/arm/assembler-arm.h      Wed Oct 15 00:05:09 2014 UTC
+++ /trunk/src/arm/assembler-arm.h      Fri Oct 31 13:19:44 2014 UTC
@@ -1279,6 +1279,14 @@
              const DwVfpRegister src,
              const Condition cond = al);

+  // ARMv8 rounding instructions.
+  void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
+  void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
+  void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
+  void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
+  void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
+              const Condition cond = al);
+
   // Support for NEON.
   // All these APIs support D0 to D31 and Q0 to Q15.

=======================================
--- /trunk/src/arm/constants-arm.h      Sun Aug 24 11:34:17 2014 UTC
+++ /trunk/src/arm/constants-arm.h      Fri Oct 31 13:19:44 2014 UTC
@@ -161,26 +161,26 @@

 // Instruction encoding bits and masks.
 enum {
-  H   = 1 << 5,   // Halfword (or byte).
-  S6  = 1 << 6,   // Signed (or unsigned).
-  L   = 1 << 20,  // Load (or store).
-  S   = 1 << 20,  // Set condition code (or leave unchanged).
-  W   = 1 << 21,  // Writeback base register (or leave unchanged).
-  A   = 1 << 21,  // Accumulate in multiply instruction (or not).
-  B   = 1 << 22,  // Unsigned byte (or word).
-  N   = 1 << 22,  // Long (or short).
-  U   = 1 << 23,  // Positive (or negative) offset/index.
- P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
-  I   = 1 << 25,  // Immediate shifter operand (or not).
-
-  B4  = 1 << 4,
-  B5  = 1 << 5,
-  B6  = 1 << 6,
-  B7  = 1 << 7,
-  B8  = 1 << 8,
-  B9  = 1 << 9,
+  H = 1 << 5,   // Halfword (or byte).
+  S6 = 1 << 6,  // Signed (or unsigned).
+  L = 1 << 20,  // Load (or store).
+  S = 1 << 20,  // Set condition code (or leave unchanged).
+  W = 1 << 21,  // Writeback base register (or leave unchanged).
+  A = 1 << 21,  // Accumulate in multiply instruction (or not).
+  B = 1 << 22,  // Unsigned byte (or word).
+  N = 1 << 22,  // Long (or short).
+  U = 1 << 23,  // Positive (or negative) offset/index.
+ P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
+  I = 1 << 25,  // Immediate shifter operand (or not).
+  B4 = 1 << 4,
+  B5 = 1 << 5,
+  B6 = 1 << 6,
+  B7 = 1 << 7,
+  B8 = 1 << 8,
+  B9 = 1 << 9,
   B12 = 1 << 12,
   B16 = 1 << 16,
+  B17 = 1 << 17,
   B18 = 1 << 18,
   B19 = 1 << 19,
   B20 = 1 << 20,
@@ -194,16 +194,16 @@
   B28 = 1 << 28,

   // Instruction bit masks.
-  kCondMask   = 15 << 28,
-  kALUMask    = 0x6f << 21,
-  kRdMask     = 15 << 12,  // In str instruction.
+  kCondMask = 15 << 28,
+  kALUMask = 0x6f << 21,
+  kRdMask = 15 << 12,  // In str instruction.
   kCoprocessorMask = 15 << 8,
   kOpCodeMask = 15 << 21,  // In data-processing instructions.
-  kImm24Mask  = (1 << 24) - 1,
-  kImm16Mask  = (1 << 16) - 1,
-  kImm8Mask  = (1 << 8) - 1,
-  kOff12Mask  = (1 << 12) - 1,
-  kOff8Mask  = (1 << 8) - 1
+  kImm24Mask = (1 << 24) - 1,
+  kImm16Mask = (1 << 16) - 1,
+  kImm8Mask = (1 << 8) - 1,
+  kOff12Mask = (1 << 12) - 1,
+  kOff8Mask = (1 << 8) - 1
 };


=======================================
--- /trunk/src/arm/disasm-arm.cc        Fri Oct 17 20:38:26 2014 UTC
+++ /trunk/src/arm/disasm-arm.cc        Fri Oct 31 13:19:44 2014 UTC
@@ -1277,6 +1277,14 @@
         } else {
           Unknown(instr);  // Not used by V8.
         }
+ } else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
+        bool dp_operation = (instr->SzValue() == 1);
+        // vrintz - round towards zero (truncate)
+        if (dp_operation) {
+          Format(instr, "vrintz'cond.f64.f64 'Dd, 'Dm");
+        } else {
+          Unknown(instr);  // Not used by V8.
+        }
       } else {
         Unknown(instr);  // Not used by V8.
       }
@@ -1623,6 +1631,50 @@
           out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                       "pld [r%d, #+%d]", Rn, offset);
         }
+      } else {
+        Unknown(instr);
+      }
+      break;
+    case 0x1D:
+      if (instr->Opc1Value() == 0x7 && instr->Bits(19, 18) == 0x2 &&
+          instr->Bits(11, 9) == 0x5 && instr->Bits(7, 6) == 0x1 &&
+          instr->Bit(4) == 0x0) {
+        // VRINTA, VRINTN, VRINTP, VRINTM (floating-point)
+        bool dp_operation = (instr->SzValue() == 1);
+        int rounding_mode = instr->Bits(17, 16);
+        switch (rounding_mode) {
+          case 0x0:
+            if (dp_operation) {
+              Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
+            } else {
+              Unknown(instr);
+            }
+            break;
+          case 0x1:
+            if (dp_operation) {
+              Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
+            } else {
+              Unknown(instr);
+            }
+            break;
+          case 0x2:
+            if (dp_operation) {
+              Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
+            } else {
+              Unknown(instr);
+            }
+            break;
+          case 0x3:
+            if (dp_operation) {
+              Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
+            } else {
+              Unknown(instr);
+            }
+            break;
+          default:
+            UNREACHABLE();  // Case analysis is exhaustive.
+            break;
+        }
       } else {
         Unknown(instr);
       }
=======================================
--- /trunk/src/arm/simulator-arm.cc     Mon Oct 27 07:54:22 2014 UTC
+++ /trunk/src/arm/simulator-arm.cc     Fri Oct 31 13:19:44 2014 UTC
@@ -2957,6 +2957,12 @@
         } else {
           UNREACHABLE();  // Not used by v8.
         }
+ } else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
+        // vrintz - truncate
+        double dm_value = get_double_from_d_register(vm);
+        double dd_value = std::trunc(dm_value);
+        dd_value = canonicalizeNaN(dd_value);
+        set_d_register_from_double(vd, dd_value);
       } else {
         UNREACHABLE();  // Not used by V8.
       }
@@ -3606,6 +3612,50 @@
       } else {
         UNIMPLEMENTED();
       }
+      break;
+    case 0x1D:
+      if (instr->Opc1Value() == 0x7 && instr->Opc3Value() == 0x1 &&
+          instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2 &&
+          instr->Bit(8) == 0x1) {
+        int vm = instr->VFPMRegValue(kDoublePrecision);
+        int vd = instr->VFPDRegValue(kDoublePrecision);
+        double dm_value = get_double_from_d_register(vm);
+        double dd_value = 0.0;
+        int rounding_mode = instr->Bits(17, 16);
+        switch (rounding_mode) {
+          case 0x0:  // vrinta - round with ties to away from zero
+            dd_value = std::round(dm_value);
+            break;
+          case 0x1: {  // vrintn - round with ties to even
+            dd_value = std::floor(dm_value);
+            double error = dm_value - dd_value;
+ // Take care of correctly handling the range [-0.5, -0.0], which
+            // must yield -0.0.
+            if ((-0.5 <= dm_value) && (dm_value < 0.0)) {
+              dd_value = -0.0;
+ // If the error is greater than 0.5, or is equal to 0.5 and the
+              // integer result is odd, round up.
+            } else if ((error > 0.5) ||
+                       ((error == 0.5) && (fmod(dd_value, 2) != 0))) {
+              dd_value++;
+            }
+            break;
+          }
+          case 0x2:  // vrintp - ceil
+            dd_value = std::ceil(dm_value);
+            break;
+          case 0x3:  // vrintm - floor
+            dd_value = std::floor(dm_value);
+            break;
+          default:
+            UNREACHABLE();  // Case analysis is exhaustive.
+            break;
+        }
+        dd_value = canonicalizeNaN(dd_value);
+        set_d_register_from_double(vd, dd_value);
+      } else {
+        UNIMPLEMENTED();
+      }
       break;
     default:
       UNIMPLEMENTED();
=======================================
--- /trunk/src/ast-value-factory.cc     Mon Oct 27 07:54:22 2014 UTC
+++ /trunk/src/ast-value-factory.cc     Fri Oct 31 13:19:44 2014 UTC
@@ -159,9 +159,6 @@
       return DoubleToBoolean(number_);
     case SMI:
       return smi_ != 0;
-    case STRING_ARRAY:
-      UNREACHABLE();
-      break;
     case BOOLEAN:
       return bool_;
     case NULL_TYPE:
@@ -202,22 +199,6 @@
         value_ = isolate->factory()->false_value();
       }
       break;
-    case STRING_ARRAY: {
-      DCHECK(strings_ != NULL);
-      Factory* factory = isolate->factory();
-      int len = strings_->length();
-      Handle<FixedArray> elements = factory->NewFixedArray(len, TENURED);
-      for (int i = 0; i < len; i++) {
-        const AstRawString* string = (*strings_)[i];
-        Handle<Object> element = string->string();
-        // Strings are already internalized.
-        DCHECK(!element.is_null());
-        elements->set(i, *element);
-      }
-      value_ =
- factory->NewJSArrayWithElements(elements, FAST_ELEMENTS, TENURED);
-      break;
-    }
     case NULL_TYPE:
       value_ = isolate->factory()->null_value();
       break;
@@ -347,17 +328,6 @@
   } else {
     GENERATE_VALUE_GETTER(false_value_, false);
   }
-}
-
-
-const AstValue* AstValueFactory::NewStringList(
-    ZoneList<const AstRawString*>* strings) {
-  AstValue* value = new (zone_) AstValue(strings);
-  if (isolate_) {
-    value->Internalize(isolate_);
-  }
-  values_.Add(value);
-  return value;
 }


=======================================
--- /trunk/src/ast-value-factory.h      Mon Oct 27 07:54:22 2014 UTC
+++ /trunk/src/ast-value-factory.h      Fri Oct 31 13:19:44 2014 UTC
@@ -194,7 +194,6 @@
     NUMBER,
     SMI,
     BOOLEAN,
-    STRING_ARRAY,
     NULL_TYPE,
     UNDEFINED,
     THE_HOLE
@@ -212,10 +211,6 @@
   }

   explicit AstValue(bool b) : type_(BOOLEAN) { bool_ = b; }
-
- explicit AstValue(ZoneList<const AstRawString*>* s) : type_(STRING_ARRAY) {
-    strings_ = s;
-  }

   explicit AstValue(Type t) : type_(t) {
     DCHECK(t == NULL_TYPE || t == UNDEFINED || t == THE_HOLE);
@@ -239,33 +234,33 @@


 // For generating constants.
-#define STRING_CONSTANTS(F)                           \
-  F(anonymous_function, "(anonymous function)")       \
-  F(arguments, "arguments")                           \
-  F(constructor, "constructor")                       \
-  F(done, "done")                                     \
-  F(dot, ".")                                         \
-  F(dot_for, ".for")                                  \
-  F(dot_generator, ".generator")                      \
-  F(dot_generator_object, ".generator_object")        \
-  F(dot_iterator, ".iterator")                        \
-  F(dot_module, ".module")                            \
-  F(dot_result, ".result")                            \
-  F(empty, "")                                        \
-  F(eval, "eval")                                     \
-  F(initialize_const_global, "initializeConstGlobal") \
-  F(initialize_var_global, "initializeVarGlobal")     \
-  F(make_reference_error, "MakeReferenceError")       \
-  F(make_syntax_error, "MakeSyntaxError")             \
-  F(make_type_error, "MakeTypeError")                 \
-  F(module, "module")                                 \
-  F(native, "native")                                 \
-  F(next, "next")                                     \
-  F(proto, "__proto__")                               \
-  F(prototype, "prototype")                           \
-  F(this, "this")                                     \
-  F(use_asm, "use asm")                               \
-  F(use_strict, "use strict")                         \
+#define STRING_CONSTANTS(F)                             \
+  F(anonymous_function, "(anonymous function)")         \
+  F(arguments, "arguments")                             \
+  F(constructor, "constructor")                         \
+  F(done, "done")                                       \
+  F(dot, ".")                                           \
+  F(dot_for, ".for")                                    \
+  F(dot_generator, ".generator")                        \
+  F(dot_generator_object, ".generator_object")          \
+  F(dot_iterator, ".iterator")                          \
+  F(dot_module, ".module")                              \
+  F(dot_result, ".result")                              \
+  F(empty, "")                                          \
+  F(eval, "eval")                                       \
+  F(initialize_const_global, "initializeConstGlobal")   \
+  F(initialize_var_global, "initializeVarGlobal")       \
+  F(make_reference_error, "MakeReferenceErrorEmbedded") \
+  F(make_syntax_error, "MakeSyntaxErrorEmbedded")       \
+  F(make_type_error, "MakeTypeErrorEmbedded")           \
+  F(module, "module")                                   \
+  F(native, "native")                                   \
+  F(next, "next")                                       \
+  F(proto, "__proto__")                                 \
+  F(prototype, "prototype")                             \
+  F(this, "this")                                       \
+  F(use_asm, "use asm")                                 \
+  F(use_strict, "use strict")                           \
   F(value, "value")

 #define OTHER_CONSTANTS(F) \
=======================================
--- /trunk/src/ast.h    Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/ast.h    Fri Oct 31 13:19:44 2014 UTC
@@ -3414,13 +3414,6 @@
         new (zone_) Literal(zone_, ast_value_factory_->NewBoolean(b), pos);
     VISIT_AND_RETURN(Literal, lit)
   }
-
-  Literal* NewStringListLiteral(ZoneList<const AstRawString*>* strings,
-                                int pos) {
-    Literal* lit = new (zone_)
-        Literal(zone_, ast_value_factory_->NewStringList(strings), pos);
-    VISIT_AND_RETURN(Literal, lit)
-  }

   Literal* NewNullLiteral(int pos) {
     Literal* lit =
=======================================
--- /trunk/src/compiler/arm/code-generator-arm.cc Tue Oct 28 09:48:49 2014 UTC +++ /trunk/src/compiler/arm/code-generator-arm.cc Fri Oct 31 13:19:44 2014 UTC
@@ -363,6 +363,18 @@
     case kArmVsqrtF64:
       __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
       break;
+    case kArmVfloorF64:
+      __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVceilF64:
+      __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVroundTruncateF64:
+      __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVroundTiesAwayF64:
+      __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
     case kArmVnegF64:
       __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
       break;
=======================================
--- /trunk/src/compiler/arm/instruction-codes-arm.h Wed Oct 15 13:35:30 2014 UTC +++ /trunk/src/compiler/arm/instruction-codes-arm.h Fri Oct 31 13:19:44 2014 UTC
@@ -44,6 +44,10 @@
   V(ArmVmodF64)                    \
   V(ArmVnegF64)                    \
   V(ArmVsqrtF64)                   \
+  V(ArmVfloorF64)                  \
+  V(ArmVceilF64)                   \
+  V(ArmVroundTruncateF64)          \
+  V(ArmVroundTiesAwayF64)          \
   V(ArmVcvtF32F64)                 \
   V(ArmVcvtF64F32)                 \
   V(ArmVcvtF64S32)                 \
=======================================
--- /trunk/src/compiler/arm/instruction-selector-arm.cc Wed Oct 29 14:24:00 2014 UTC +++ /trunk/src/compiler/arm/instruction-selector-arm.cc Fri Oct 31 13:19:44 2014 UTC
@@ -100,6 +100,10 @@
       case kArmVmodF64:
       case kArmVnegF64:
       case kArmVsqrtF64:
+      case kArmVfloorF64:
+      case kArmVceilF64:
+      case kArmVroundTruncateF64:
+      case kArmVroundTiesAwayF64:
       case kArmVcvtF32F64:
       case kArmVcvtF64F32:
       case kArmVcvtF64S32:
@@ -113,6 +117,14 @@
     return false;
   }
 };
+
+
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                           Node* node) {
+  ArmOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}


static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
@@ -824,6 +836,30 @@
   ArmOperandGenerator g(this);
Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
 }
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVfloorF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVceilF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVroundTruncateF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVroundTiesAwayF64, node);
+}


 void InstructionSelector::VisitCall(Node* node) {
@@ -1139,10 +1175,19 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kInt32DivIsSafe |
-         MachineOperatorBuilder::kInt32ModIsSafe |
-         MachineOperatorBuilder::kUint32DivIsSafe |
-         MachineOperatorBuilder::kUint32ModIsSafe;
+  MachineOperatorBuilder::Flags flags =
+      MachineOperatorBuilder::kInt32DivIsSafe |
+      MachineOperatorBuilder::kInt32ModIsSafe |
+      MachineOperatorBuilder::kUint32DivIsSafe |
+      MachineOperatorBuilder::kUint32ModIsSafe;
+
+  if (CpuFeatures::IsSupported(ARMv8)) {
+    flags |= MachineOperatorBuilder::kFloat64Floor |
+             MachineOperatorBuilder::kFloat64Ceil |
+             MachineOperatorBuilder::kFloat64RoundTruncate |
+             MachineOperatorBuilder::kFloat64RoundTiesAway;
+  }
+  return flags;
 }

 }  // namespace compiler
=======================================
--- /trunk/src/compiler/arm/linkage-arm.cc      Tue Sep  9 00:05:04 2014 UTC
+++ /trunk/src/compiler/arm/linkage-arm.cc      Fri Oct 31 13:19:44 2014 UTC
@@ -49,7 +49,7 @@


 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
                                    flags);
=======================================
--- /trunk/src/compiler/arm64/code-generator-arm64.cc Tue Oct 28 09:48:49 2014 UTC +++ /trunk/src/compiler/arm64/code-generator-arm64.cc Fri Oct 31 13:19:44 2014 UTC
@@ -222,6 +222,18 @@
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
+    case kArm64Float64Ceil:
+      __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64Floor:
+      __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64RoundTruncate:
+      __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64RoundTiesAway:
+      __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
     case kArm64Add:
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
=======================================
--- /trunk/src/compiler/arm64/instruction-codes-arm64.h Wed Oct 15 13:35:30 2014 UTC +++ /trunk/src/compiler/arm64/instruction-codes-arm64.h Fri Oct 31 13:19:44 2014 UTC
@@ -78,6 +78,10 @@
   V(Arm64Float64Div)               \
   V(Arm64Float64Mod)               \
   V(Arm64Float64Sqrt)              \
+  V(Arm64Float64Floor)             \
+  V(Arm64Float64Ceil)              \
+  V(Arm64Float64RoundTruncate)     \
+  V(Arm64Float64RoundTiesAway)     \
   V(Arm64Float32ToFloat64)         \
   V(Arm64Float64ToFloat32)         \
   V(Arm64Float64ToInt32)           \
=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64.cc Thu Oct 30 10:14:23 2014 UTC +++ /trunk/src/compiler/arm64/instruction-selector-arm64.cc Fri Oct 31 13:19:44 2014 UTC
@@ -84,6 +84,14 @@
            Assembler::IsImmLSUnscaled(value);
   }
 };
+
+
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                           Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}


 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
@@ -899,9 +907,27 @@


 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRRFloat64(this, kArm64Float64Sqrt, node);
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  VisitRRFloat64(this, kArm64Float64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  VisitRRFloat64(this, kArm64Float64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  VisitRRFloat64(this, kArm64Float64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  VisitRRFloat64(this, kArm64Float64RoundTiesAway, node);
 }


@@ -1317,9 +1343,11 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kNoFlags;
+  return MachineOperatorBuilder::kFloat64Floor |
+         MachineOperatorBuilder::kFloat64Ceil |
+         MachineOperatorBuilder::kFloat64RoundTruncate |
+         MachineOperatorBuilder::kFloat64RoundTiesAway;
 }
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
=======================================
--- /trunk/src/compiler/arm64/linkage-arm64.cc  Tue Sep  9 00:05:04 2014 UTC
+++ /trunk/src/compiler/arm64/linkage-arm64.cc  Fri Oct 31 13:19:44 2014 UTC
@@ -49,7 +49,7 @@


 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
                                    flags);
=======================================
--- /trunk/src/compiler/change-lowering.cc      Thu Oct 23 08:44:45 2014 UTC
+++ /trunk/src/compiler/change-lowering.cc      Fri Oct 31 13:19:44 2014 UTC
@@ -46,14 +46,14 @@
   STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
   const int heap_number_value_offset =
((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4)); - return jsgraph()->Int32Constant(heap_number_value_offset - kHeapObjectTag); + return jsgraph()->IntPtrConstant(heap_number_value_offset - kHeapObjectTag);
 }


 Node* ChangeLowering::SmiMaxValueConstant() {
const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize() : SmiTagging<8>::SmiValueSize();
-  return jsgraph()->Int32Constant(
+  return jsgraph()->IntPtrConstant(
       -(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
 }

@@ -61,7 +61,7 @@
 Node* ChangeLowering::SmiShiftBitsConstant() {
const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize() : SmiTagging<8>::SmiShiftSize();
-  return jsgraph()->Int32Constant(smi_shift_size + kSmiTagSize);
+  return jsgraph()->IntPtrConstant(smi_shift_size + kSmiTagSize);
 }


@@ -166,7 +166,7 @@
   STATIC_ASSERT(kSmiTagMask == 1);

   Node* tag = graph()->NewNode(machine()->WordAnd(), val,
-                               jsgraph()->Int32Constant(kSmiTagMask));
+                               jsgraph()->IntPtrConstant(kSmiTagMask));
   Node* branch = graph()->NewNode(common()->Branch(), tag, control);

   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
@@ -192,7 +192,7 @@
   STATIC_ASSERT(kSmiTagMask == 1);

   Node* tag = graph()->NewNode(machine()->WordAnd(), val,
-                               jsgraph()->Int32Constant(kSmiTagMask));
+                               jsgraph()->IntPtrConstant(kSmiTagMask));
   Node* branch = graph()->NewNode(common()->Branch(), tag, control);

   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
=======================================
--- /trunk/src/compiler/code-generator-impl.h   Wed Oct 15 00:05:09 2014 UTC
+++ /trunk/src/compiler/code-generator-impl.h   Fri Oct 31 13:19:44 2014 UTC
@@ -5,10 +5,12 @@
 #ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
 #define V8_COMPILER_CODE_GENERATOR_IMPL_H_

+#include "src/code-stubs.h"
 #include "src/compiler/code-generator.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/opcodes.h"
+#include "src/macro-assembler.h"

 namespace v8 {
 namespace internal {
=======================================
--- /trunk/src/compiler/code-generator.h        Tue Oct 28 09:48:49 2014 UTC
+++ /trunk/src/compiler/code-generator.h        Fri Oct 31 13:19:44 2014 UTC
@@ -17,6 +17,8 @@
 namespace internal {
 namespace compiler {

+class Linkage;
+
 // Generates native code for a sequence of instructions.
 class CodeGenerator FINAL : public GapResolver::Assembler {
  public:
=======================================
--- /trunk/src/compiler/common-operator.cc      Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/common-operator.cc      Fri Oct 31 13:19:44 2014 UTC
@@ -7,6 +7,8 @@
 #include "src/assembler.h"
 #include "src/base/lazy-instance.h"
 #include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
 #include "src/unique.h"
 #include "src/zone.h"

=======================================
--- /trunk/src/compiler/graph-visualizer.cc     Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/graph-visualizer.cc     Fri Oct 31 13:19:44 2014 UTC
@@ -7,6 +7,7 @@
 #include <sstream>
 #include <string>

+#include "src/code-stubs.h"
 #include "src/compiler/generic-algorithm.h"
 #include "src/compiler/generic-node.h"
 #include "src/compiler/generic-node-inl.h"
@@ -673,19 +674,16 @@
   Tag tag(this, "intervals");
   PrintStringProperty("name", phase);

- const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
-  for (int i = 0; i < fixed_d->length(); ++i) {
-    PrintLiveRange(fixed_d->at(i), "fixed");
+  for (auto range : allocator->fixed_double_live_ranges()) {
+    PrintLiveRange(range, "fixed");
   }

-  const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
-  for (int i = 0; i < fixed->length(); ++i) {
-    PrintLiveRange(fixed->at(i), "fixed");
+  for (auto range : allocator->fixed_live_ranges()) {
+    PrintLiveRange(range, "fixed");
   }

-  const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
-  for (int i = 0; i < live_ranges->length(); ++i) {
-    PrintLiveRange(live_ranges->at(i), "object");
+  for (auto range : allocator->live_ranges()) {
+    PrintLiveRange(range, "object");
   }
 }

=======================================
--- /trunk/src/compiler/ia32/code-generator-ia32.cc Tue Oct 28 09:48:49 2014 UTC +++ /trunk/src/compiler/ia32/code-generator-ia32.cc Fri Oct 31 13:19:44 2014 UTC
@@ -353,6 +353,24 @@
     case kSSEFloat64Sqrt:
       __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
+    case kSSEFloat64Floor: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundDown);
+      break;
+    }
+    case kSSEFloat64Ceil: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundUp);
+      break;
+    }
+    case kSSEFloat64RoundTruncate: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundToZero);
+      break;
+    }
     case kSSECvtss2sd:
       __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
=======================================
--- /trunk/src/compiler/ia32/instruction-codes-ia32.h Wed Oct 15 13:35:30 2014 UTC +++ /trunk/src/compiler/ia32/instruction-codes-ia32.h Fri Oct 31 13:19:44 2014 UTC
@@ -36,6 +36,9 @@
   V(SSEFloat64Div)                 \
   V(SSEFloat64Mod)                 \
   V(SSEFloat64Sqrt)                \
+  V(SSEFloat64Floor)               \
+  V(SSEFloat64Ceil)                \
+  V(SSEFloat64RoundTruncate)       \
   V(SSECvtss2sd)                   \
   V(SSECvtsd2ss)                   \
   V(SSEFloat64ToInt32)             \
=======================================
--- /trunk/src/compiler/ia32/instruction-selector-ia32.cc Wed Oct 29 14:24:00 2014 UTC +++ /trunk/src/compiler/ia32/instruction-selector-ia32.cc Fri Oct 31 13:19:44 2014 UTC
@@ -51,6 +51,178 @@
   DCHECK(0 <= power && power < 4);
   return static_cast<AddressingMode>(static_cast<int>(base_mode) + power);
 }
+
+
+// Fairly intel-specify node matcher used for matching scale factors in
+// addressing modes.
+// Matches nodes of form [x * N] for N in {1,2,4,8}
+class ScaleFactorMatcher : public NodeMatcher {
+ public:
+  static const int kMatchedFactors[4];
+
+  explicit ScaleFactorMatcher(Node* node);
+
+  bool Matches() const { return left_ != NULL; }
+  int Power() const {
+    DCHECK(Matches());
+    return power_;
+  }
+  Node* Left() const {
+    DCHECK(Matches());
+    return left_;
+  }
+
+ private:
+  Node* left_;
+  int power_;
+};
+
+
+// Fairly intel-specify node matcher used for matching index and displacement
+// operands in addressing modes.
+// Matches nodes of form:
+//  [x * N]
+//  [x * N + K]
+//  [x + K]
+//  [x] -- fallback case
+// for N in {1,2,4,8} and K int32_t
+class IndexAndDisplacementMatcher : public NodeMatcher {
+ public:
+  explicit IndexAndDisplacementMatcher(Node* node);
+
+  Node* index_node() const { return index_node_; }
+  int displacement() const { return displacement_; }
+  int power() const { return power_; }
+
+ private:
+  Node* index_node_;
+  int displacement_;
+  int power_;
+};
+
+
+// Fairly intel-specify node matcher used for matching multiplies that can be
+// transformed to lea instructions.
+// Matches nodes of form:
+//  [x * N]
+// for N in {1,2,3,4,5,8,9}
+class LeaMultiplyMatcher : public NodeMatcher {
+ public:
+  static const int kMatchedFactors[7];
+
+  explicit LeaMultiplyMatcher(Node* node);
+
+  bool Matches() const { return left_ != NULL; }
+  int Power() const {
+    DCHECK(Matches());
+    return power_;
+  }
+  Node* Left() const {
+    DCHECK(Matches());
+    return left_;
+  }
+  // Displacement will be either 0 or 1.
+  int32_t Displacement() const {
+    DCHECK(Matches());
+    return displacement_;
+  }
+
+ private:
+  Node* left_;
+  int power_;
+  int displacement_;
+};
+
+
+const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
+
+
+ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
+    : NodeMatcher(node), left_(NULL), power_(0) {
+  if (opcode() != IrOpcode::kInt32Mul) return;
+  // TODO(dcarney): should test 64 bit ints as well.
+  Int32BinopMatcher m(this->node());
+  if (!m.right().HasValue()) return;
+  int32_t value = m.right().Value();
+  switch (value) {
+    case 8:
+      power_++;  // Fall through.
+    case 4:
+      power_++;  // Fall through.
+    case 2:
+      power_++;  // Fall through.
+    case 1:
+      break;
+    default:
+      return;
+  }
+  left_ = m.left().node();
+}
+
+
+IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
+    : NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
+  if (opcode() == IrOpcode::kInt32Add) {
+    Int32BinopMatcher m(this->node());
+    if (m.right().HasValue()) {
+      displacement_ = m.right().Value();
+      index_node_ = m.left().node();
+    }
+  }
+  // Test scale factor.
+  ScaleFactorMatcher scale_matcher(index_node_);
+  if (scale_matcher.Matches()) {
+    index_node_ = scale_matcher.Left();
+    power_ = scale_matcher.Power();
+  }
+}
+
+
+const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
+
+
+LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
+    : NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
+  if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
+    return;
+  }
+  int64_t value;
+  Node* left = NULL;
+  {
+    Int32BinopMatcher m(this->node());
+    if (m.right().HasValue()) {
+      value = m.right().Value();
+      left = m.left().node();
+    } else {
+      Int64BinopMatcher m(this->node());
+      if (m.right().HasValue()) {
+        value = m.right().Value();
+        left = m.left().node();
+      } else {
+        return;
+      }
+    }
+  }
+  switch (value) {
+    case 9:
+    case 8:
+      power_++;  // Fall through.
+    case 5:
+    case 4:
+      power_++;  // Fall through.
+    case 3:
+    case 2:
+      power_++;  // Fall through.
+    case 1:
+      break;
+    default:
+      return;
+  }
+  if (!base::bits::IsPowerOfTwo64(value)) {
+    displacement_ = 1;
+  }
+  left_ = left;
+}


 class AddressingModeMatcher {
@@ -140,6 +312,14 @@
   InstructionOperand* displacement_operand_;
   AddressingMode mode_;
 };
+
+
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                           Node* node) {
+  IA32OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}


 void InstructionSelector::VisitLoad(Node* node) {
@@ -595,6 +775,29 @@
   Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
 }

+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  UNREACHABLE();
+}
+

 void InstructionSelector::VisitCall(Node* node) {
   IA32OperandGenerator g(this);
@@ -881,9 +1084,13 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kNoFlags;
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    return MachineOperatorBuilder::kFloat64Floor |
+           MachineOperatorBuilder::kFloat64Ceil |
+           MachineOperatorBuilder::kFloat64RoundTruncate;
+  }
+  return MachineOperatorBuilder::Flag::kNoFlags;
 }
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
=======================================
--- /trunk/src/compiler/ia32/linkage-ia32.cc    Tue Sep  9 00:05:04 2014 UTC
+++ /trunk/src/compiler/ia32/linkage-ia32.cc    Fri Oct 31 13:19:44 2014 UTC
@@ -44,7 +44,7 @@


 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
                                    flags);
=======================================
--- /trunk/src/compiler/instruction-selector-impl.h Mon Oct 27 07:54:22 2014 UTC +++ /trunk/src/compiler/instruction-selector-impl.h Fri Oct 31 13:19:44 2014 UTC
@@ -9,6 +9,7 @@
 #include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/linkage.h"
+#include "src/macro-assembler.h"

 namespace v8 {
 namespace internal {
@@ -45,7 +46,8 @@

   InstructionOperand* DefineAsConstant(Node* node) {
     selector()->MarkAsDefined(node);
-    int virtual_register = sequence()->AddConstant(node, ToConstant(node));
+    int virtual_register = selector_->GetVirtualRegister(node);
+    sequence()->AddConstant(virtual_register, ToConstant(node));
     return ConstantOperand::Create(virtual_register, zone());
   }

@@ -171,8 +173,7 @@
   UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
     DCHECK_NOT_NULL(node);
     DCHECK_NOT_NULL(operand);
-    operand->set_virtual_register(
-        selector_->sequence()->GetVirtualRegister(node));
+    operand->set_virtual_register(selector_->GetVirtualRegister(node));
     selector()->MarkAsDefined(node);
     return operand;
   }
@@ -180,8 +181,7 @@
   UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
     DCHECK_NOT_NULL(node);
     DCHECK_NOT_NULL(operand);
-    operand->set_virtual_register(
-        selector_->sequence()->GetVirtualRegister(node));
+    operand->set_virtual_register(selector_->GetVirtualRegister(node));
     selector()->MarkAsUsed(node);
     return operand;
   }
=======================================
--- /trunk/src/compiler/instruction-selector.cc Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/instruction-selector.cc Fri Oct 31 13:19:44 2014 UTC
@@ -4,6 +4,7 @@

 #include "src/compiler/instruction-selector.h"

+#include "src/compiler/graph.h"
 #include "src/compiler/instruction-selector-impl.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
@@ -13,7 +14,8 @@
 namespace internal {
 namespace compiler {

-InstructionSelector::InstructionSelector(Zone* local_zone, Linkage* linkage,
+InstructionSelector::InstructionSelector(Zone* local_zone, Graph* graph,
+                                         Linkage* linkage,
                                          InstructionSequence* sequence,
                                          Schedule* schedule,
SourcePositionTable* source_positions,
@@ -24,10 +26,11 @@
       source_positions_(source_positions),
       features_(features),
       schedule_(schedule),
+      node_map_(graph->NodeCount(), kNodeUnmapped, zone()),
       current_block_(NULL),
       instructions_(zone()),
-      defined_(sequence->node_count(), false, zone()),
-      used_(sequence->node_count(), false, zone()) {}
+      defined_(graph->NodeCount(), false, zone()),
+      used_(graph->NodeCount(), false, zone()) {}


 void InstructionSelector::SelectInstructions() {
@@ -155,6 +158,19 @@
   return node->OwnedBy(user) &&
          schedule()->block(node) == schedule()->block(user);
 }
+
+
+int InstructionSelector::GetVirtualRegister(const Node* node) {
+  if (node_map_[node->id()] == kNodeUnmapped) {
+    node_map_[node->id()] = sequence()->NextVirtualRegister();
+  }
+  return node_map_[node->id()];
+}
+
+
+int InstructionSelector::GetMappedVirtualRegister(const Node* node) const {
+  return node_map_[node->id()];
+}


 bool InstructionSelector::IsDefined(Node* node) const {
@@ -195,27 +211,31 @@

 bool InstructionSelector::IsDouble(const Node* node) const {
   DCHECK_NOT_NULL(node);
-  return sequence()->IsDouble(sequence()->GetVirtualRegister(node));
+  int virtual_register = GetMappedVirtualRegister(node);
+  if (virtual_register == kNodeUnmapped) return false;
+  return sequence()->IsDouble(virtual_register);
 }


 void InstructionSelector::MarkAsDouble(Node* node) {
   DCHECK_NOT_NULL(node);
   DCHECK(!IsReference(node));
-  sequence()->MarkAsDouble(sequence()->GetVirtualRegister(node));
+  sequence()->MarkAsDouble(GetVirtualRegister(node));
 }


 bool InstructionSelector::IsReference(const Node* node) const {
   DCHECK_NOT_NULL(node);
-  return sequence()->IsReference(sequence()->GetVirtualRegister(node));
+  int virtual_register = GetMappedVirtualRegister(node);
+  if (virtual_register == kNodeUnmapped) return false;
+  return sequence()->IsReference(virtual_register);
 }


 void InstructionSelector::MarkAsReference(Node* node) {
   DCHECK_NOT_NULL(node);
   DCHECK(!IsDouble(node));
-  sequence()->MarkAsReference(sequence()->GetVirtualRegister(node));
+  sequence()->MarkAsReference(GetVirtualRegister(node));
 }


@@ -583,6 +603,10 @@
     case IrOpcode::kFloat64Div:
     case IrOpcode::kFloat64Mod:
     case IrOpcode::kFloat64Sqrt:
+    case IrOpcode::kFloat64Floor:
+    case IrOpcode::kFloat64Ceil:
+    case IrOpcode::kFloat64RoundTruncate:
+    case IrOpcode::kFloat64RoundTiesAway:
       return kMachFloat64;
     case IrOpcode::kFloat64Equal:
     case IrOpcode::kFloat64LessThan:
@@ -772,11 +796,20 @@
       return VisitFloat64LessThan(node);
     case IrOpcode::kFloat64LessThanOrEqual:
       return VisitFloat64LessThanOrEqual(node);
+    case IrOpcode::kFloat64Floor:
+      return MarkAsDouble(node), VisitFloat64Floor(node);
+    case IrOpcode::kFloat64Ceil:
+      return MarkAsDouble(node), VisitFloat64Ceil(node);
+    case IrOpcode::kFloat64RoundTruncate:
+      return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
+    case IrOpcode::kFloat64RoundTiesAway:
+      return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
     case IrOpcode::kLoadStackPointer:
       return VisitLoadStackPointer(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
+      break;
   }
 }

@@ -892,14 +925,14 @@
 void InstructionSelector::VisitPhi(Node* node) {
   // TODO(bmeurer): Emit a PhiInstruction here.
   PhiInstruction* phi = new (instruction_zone())
- PhiInstruction(instruction_zone(), sequence()->GetVirtualRegister(node));
+      PhiInstruction(instruction_zone(), GetVirtualRegister(node));
sequence()->InstructionBlockAt(current_block_->GetRpoNumber())->AddPhi(phi);
   const int input_count = node->op()->InputCount();
   phi->operands().reserve(static_cast<size_t>(input_count));
   for (int i = 0; i < input_count; ++i) {
     Node* const input = node->InputAt(i);
     MarkAsUsed(input);
-    phi->operands().push_back(sequence()->GetVirtualRegister(input));
+    phi->operands().push_back(GetVirtualRegister(input));
   }
 }

=======================================
--- /trunk/src/compiler/instruction-selector.h  Mon Oct 27 07:54:22 2014 UTC
+++ /trunk/src/compiler/instruction-selector.h  Fri Oct 31 13:19:44 2014 UTC
@@ -19,13 +19,19 @@
 // Forward declarations.
 struct CallBuffer;  // TODO(bmeurer): Remove this.
 class FlagsContinuation;
+class Linkage;
+
+typedef IntVector NodeToVregMap;

 class InstructionSelector FINAL {
  public:
+  static const int kNodeUnmapped = -1;
+
   // Forward declarations.
   class Features;

-  InstructionSelector(Zone* local_zone, Linkage* linkage,
+  // TODO(dcarney): pass in vreg mapping instead of graph.
+  InstructionSelector(Zone* local_zone, Graph* graph, Linkage* linkage,
                       InstructionSequence* sequence, Schedule* schedule,
                       SourcePositionTable* source_positions,
                       Features features = SupportedFeatures());
@@ -108,6 +114,11 @@

   // Checks if {node} is currently live.
bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
+
+  int GetVirtualRegister(const Node* node);
+  // Gets the current mapping if it exists, kNodeUnmapped otherwise.
+  int GetMappedVirtualRegister(const Node* node) const;
+  const NodeToVregMap& GetNodeMapForTesting() const { return node_map_; }

  private:
   friend class OperandGenerator;
@@ -205,6 +216,7 @@
   SourcePositionTable* const source_positions_;
   Features features_;
   Schedule* const schedule_;
+  NodeToVregMap node_map_;
   BasicBlock* current_block_;
   ZoneDeque<Instruction*> instructions_;
   BoolVector defined_;
=======================================
--- /trunk/src/compiler/instruction.cc  Thu Oct 23 08:44:45 2014 UTC
+++ /trunk/src/compiler/instruction.cc  Fri Oct 31 13:19:44 2014 UTC
@@ -2,16 +2,20 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.

-#include "src/compiler/instruction.h"
-
 #include "src/compiler/common-operator.h"
 #include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
+#include "src/compiler/instruction.h"
+#include "src/macro-assembler.h"

 namespace v8 {
 namespace internal {
 namespace compiler {

+STATIC_ASSERT(kMaxGeneralRegisters >= Register::kNumRegisters);
+STATIC_ASSERT(kMaxDoubleRegisters >= DoubleRegister::kMaxNumRegisters);
+
+
 std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
   switch (op.kind()) {
     case InstructionOperand::INVALID:
@@ -383,10 +387,8 @@


 InstructionSequence::InstructionSequence(Zone* instruction_zone,
-                                         const Graph* graph,
                                          const Schedule* schedule)
     : zone_(instruction_zone),
-      node_map_(graph->NodeCount(), kNodeUnmapped, zone()),
instruction_blocks_(static_cast<int>(schedule->rpo_order()->size()), NULL,
                           zone()),
       constants_(ConstantMap::key_compare(),
@@ -400,14 +402,6 @@
       deoptimization_entries_(zone()) {
   InitializeInstructionBlocks(zone(), schedule, &instruction_blocks_);
 }
-
-
-int InstructionSequence::GetVirtualRegister(const Node* node) {
-  if (node_map_[node->id()] == kNodeUnmapped) {
-    node_map_[node->id()] = NextVirtualRegister();
-  }
-  return node_map_[node->id()];
-}


 Label* InstructionSequence::GetLabel(BasicBlock::RpoNumber rpo) {
=======================================
--- /trunk/src/compiler/instruction.h   Fri Oct 24 14:44:48 2014 UTC
+++ /trunk/src/compiler/instruction.h   Fri Oct 31 13:19:44 2014 UTC
@@ -16,30 +16,29 @@
 #include "src/compiler/opcodes.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/source-position.h"
-// TODO(titzer): don't include the macro-assembler?
-#include "src/macro-assembler.h"
 #include "src/zone-allocator.h"

 namespace v8 {
 namespace internal {
 namespace compiler {

-// Forward declarations.
-class Linkage;
-
 // A couple of reserved opcodes are used for internal use.
 const InstructionCode kGapInstruction = -1;
 const InstructionCode kBlockStartInstruction = -2;
 const InstructionCode kSourcePositionInstruction = -3;

+// Platform independent maxes.
+static const int kMaxGeneralRegisters = 32;
+static const int kMaxDoubleRegisters = 32;

-#define INSTRUCTION_OPERAND_LIST(V)              \
-  V(Constant, CONSTANT, 0)                       \
-  V(Immediate, IMMEDIATE, 0)                     \
-  V(StackSlot, STACK_SLOT, 128)                  \
-  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)     \
-  V(Register, REGISTER, Register::kNumRegisters) \
-  V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters)
+
+#define INSTRUCTION_OPERAND_LIST(V)           \
+  V(Constant, CONSTANT, 0)                    \
+  V(Immediate, IMMEDIATE, 0)                  \
+  V(StackSlot, STACK_SLOT, 128)               \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)  \
+  V(Register, REGISTER, kMaxGeneralRegisters) \
+  V(DoubleRegister, DOUBLE_REGISTER, kMaxDoubleRegisters)

 class InstructionOperand : public ZoneObject {
  public:
@@ -676,8 +675,10 @@
   Type type() const { return type_; }

   int32_t ToInt32() const {
-    DCHECK_EQ(kInt32, type());
-    return static_cast<int32_t>(value_);
+    DCHECK(type() == kInt32 || type() == kInt64);
+    const int32_t value = static_cast<int32_t>(value_);
+    DCHECK_EQ(value_, static_cast<int64_t>(value));
+    return value;
   }

   int64_t ToInt64() const {
@@ -843,21 +844,16 @@
 typedef ZoneDeque<PointerMap*> PointerMapDeque;
 typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
 typedef ZoneVector<InstructionBlock*> InstructionBlocks;
-typedef IntVector NodeToVregMap;

// Represents architecture-specific generated code before, during, and after
 // register allocation.
 // TODO(titzer): s/IsDouble/IsFloat64/
 class InstructionSequence FINAL {
  public:
-  static const int kNodeUnmapped = -1;
-
- InstructionSequence(Zone* zone, const Graph* graph, const Schedule* schedule);
+  InstructionSequence(Zone* zone, const Schedule* schedule);

   int NextVirtualRegister() { return next_virtual_register_++; }
   int VirtualRegisterCount() const { return next_virtual_register_; }
-
-  int node_count() const { return static_cast<int>(node_map_.size()); }

   const InstructionBlocks& instruction_blocks() const {
     return instruction_blocks_;
@@ -882,9 +878,6 @@
   }

   const InstructionBlock* GetInstructionBlock(int instruction_index) const;
-
-  int GetVirtualRegister(const Node* node);
-  const NodeToVregMap& GetNodeMapForTesting() const { return node_map_; }

   bool IsReference(int virtual_register) const;
   bool IsDouble(int virtual_register) const;
@@ -920,8 +913,8 @@
   void StartBlock(BasicBlock* block);
   void EndBlock(BasicBlock* block);

-  int AddConstant(Node* node, Constant constant) {
-    int virtual_register = GetVirtualRegister(node);
+  int AddConstant(int virtual_register, Constant constant) {
+ DCHECK(virtual_register >= 0 && virtual_register < next_virtual_register_);
     DCHECK(constants_.find(virtual_register) == constants_.end());
     constants_.insert(std::make_pair(virtual_register, constant));
     return virtual_register;
@@ -968,7 +961,6 @@
typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;

   Zone* const zone_;
-  NodeToVregMap node_map_;
   InstructionBlocks instruction_blocks_;
   ConstantMap constants_;
   ConstantDeque immediates_;
=======================================
--- /trunk/src/compiler/js-builtin-reducer.cc   Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/js-builtin-reducer.cc   Fri Oct 31 13:19:44 2014 UTC
@@ -189,6 +189,32 @@
   }
   return NoChange();
 }
+
+
+// ES6 draft 10-14-14, section 20.2.2.16.
+Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
+  if (!machine()->HasFloat64Floor()) return NoChange();
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.floor(a:number) -> Float64Floor(a)
+    Node* value = graph()->NewNode(machine()->Float64Floor(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+// ES6 draft 10-14-14, section 20.2.2.10.
+Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
+  if (!machine()->HasFloat64Ceil()) return NoChange();
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.ceil(a:number) -> Float64Ceil(a)
+    Node* value = graph()->NewNode(machine()->Float64Ceil(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}


 Reduction JSBuiltinReducer::Reduce(Node* node) {
@@ -207,6 +233,10 @@
       return ReplaceWithPureReduction(node, ReduceMathImul(node));
     case kMathFround:
       return ReplaceWithPureReduction(node, ReduceMathFround(node));
+    case kMathFloor:
+      return ReplaceWithPureReduction(node, ReduceMathFloor(node));
+    case kMathCeil:
+      return ReplaceWithPureReduction(node, ReduceMathCeil(node));
     default:
       break;
   }
=======================================
--- /trunk/src/compiler/js-builtin-reducer.h    Mon Sep 29 00:04:53 2014 UTC
+++ /trunk/src/compiler/js-builtin-reducer.h    Fri Oct 31 13:19:44 2014 UTC
@@ -35,6 +35,8 @@
   Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);
   Reduction ReduceMathFround(Node* node);
+  Reduction ReduceMathFloor(Node* node);
+  Reduction ReduceMathCeil(Node* node);

   JSGraph* jsgraph_;
   SimplifiedOperatorBuilder simplified_;
=======================================
--- /trunk/src/compiler/js-graph.cc     Fri Oct 17 11:41:30 2014 UTC
+++ /trunk/src/compiler/js-graph.cc     Fri Oct 31 13:19:44 2014 UTC
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.

+#include "src/code-stubs.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/typer.h"
=======================================
--- /trunk/src/compiler/linkage-impl.h  Tue Sep  9 00:05:04 2014 UTC
+++ /trunk/src/compiler/linkage-impl.h  Fri Oct 31 13:19:44 2014 UTC
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_LINKAGE_IMPL_H_
 #define V8_COMPILER_LINKAGE_IMPL_H_

+#include "src/code-stubs.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
@@ -129,8 +131,8 @@

   // TODO(turbofan): cache call descriptors for code stub calls.
   static CallDescriptor* GetStubCallDescriptor(
- Zone* zone, CallInterfaceDescriptor descriptor, int stack_parameter_count,
-      CallDescriptor::Flags flags) {
+      Zone* zone, const CallInterfaceDescriptor& descriptor,
+      int stack_parameter_count, CallDescriptor::Flags flags) {
     const int register_parameter_count =
         descriptor.GetEnvironmentParameterCount();
     const int js_parameter_count =
=======================================
--- /trunk/src/compiler/linkage.cc      Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/linkage.cc      Fri Oct 31 13:19:44 2014 UTC
@@ -2,10 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.

-#include "src/compiler/linkage.h"
-
 #include "src/code-stubs.h"
 #include "src/compiler.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node.h"
 #include "src/compiler/pipeline.h"
 #include "src/scopes.h"
@@ -102,7 +101,7 @@


 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags) const {
return GetStubCallDescriptor(descriptor, stack_parameter_count, flags, zone_);
 }
@@ -233,7 +232,7 @@


 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   UNIMPLEMENTED();
   return NULL;
=======================================
--- /trunk/src/compiler/linkage.h       Tue Oct 28 09:48:49 2014 UTC
+++ /trunk/src/compiler/linkage.h       Fri Oct 31 13:19:44 2014 UTC
@@ -6,15 +6,16 @@
 #define V8_COMPILER_LINKAGE_H_

 #include "src/base/flags.h"
-#include "src/code-stubs.h"
 #include "src/compiler/frame.h"
 #include "src/compiler/machine-type.h"
-#include "src/compiler/node.h"
 #include "src/compiler/operator.h"
 #include "src/zone.h"

 namespace v8 {
 namespace internal {
+
+class CallInterfaceDescriptor;
+
 namespace compiler {

 // Describes the location for a parameter or a return value to a call.
@@ -183,10 +184,10 @@
       Operator::Properties properties, Zone* zone);

   CallDescriptor* GetStubCallDescriptor(
-      CallInterfaceDescriptor descriptor, int stack_parameter_count = 0,
+ const CallInterfaceDescriptor& descriptor, int stack_parameter_count = 0,
       CallDescriptor::Flags flags = CallDescriptor::kNoFlags) const;
   static CallDescriptor* GetStubCallDescriptor(
-      CallInterfaceDescriptor descriptor, int stack_parameter_count,
+      const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
       CallDescriptor::Flags flags, Zone* zone);

   // Creates a call descriptor for simplified C calls that is appropriate
=======================================
--- /trunk/src/compiler/machine-operator-reducer.cc Thu Oct 30 10:14:23 2014 UTC +++ /trunk/src/compiler/machine-operator-reducer.cc Fri Oct 31 13:19:44 2014 UTC
@@ -370,15 +370,16 @@
       if (m.left().IsWord32Sar() && m.right().HasValue()) {
         Int32BinopMatcher mleft(m.left().node());
         if (mleft.right().HasValue()) {
-          // (x >> K) < C => x < (C << K) | (2^K - 1)
+          // (x >> K) < C => x < (C << K)
           // when C < (M >> K)
           const uint32_t c = m.right().Value();
           const uint32_t k = mleft.right().Value() & 0x1f;
           if (c < static_cast<uint32_t>(kMaxInt >> k)) {
             node->ReplaceInput(0, mleft.left().node());
- node->ReplaceInput(1, Uint32Constant((c << k) | ((1 << k) - 1)));
+            node->ReplaceInput(1, Uint32Constant(c << k));
             return Changed(node);
           }
+          // TODO(turbofan): else the comparison is always true.
         }
       }
       break;
=======================================
***Additional files exist in this changeset.***

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to