Revision: 25239
Author: [email protected]
Date: Mon Nov 10 10:45:50 2014 UTC
Log: Version 3.31.0 (based on 50a829b3cfe8ec0b7ccd1b7e154e632c9a73e5f0)
Classes: Partial fix for constructor not calling super (issues 3661, 3672).
Performance and stability improvements on all platforms.
https://code.google.com/p/v8/source/detail?r=25239
Added:
/trunk/test/mjsunit/asm/sign-extend.js
/trunk/test/mjsunit/asm/zero-extend.js
/trunk/test/mjsunit/compiler/regress-uint8-deopt.js
/trunk/test/unittests/compiler/node-matchers-unittest.cc
/trunk/tools/trace-maps-processor.py
Modified:
/trunk/Makefile
/trunk/build/features.gypi
/trunk/build/toolchain.gypi
/trunk/include/v8.h
/trunk/src/api.cc
/trunk/src/arm/assembler-arm.cc
/trunk/src/arm/assembler-arm.h
/trunk/src/arm/code-stubs-arm.cc
/trunk/src/arm/codegen-arm.cc
/trunk/src/arm/disasm-arm.cc
/trunk/src/arm/simulator-arm.cc
/trunk/src/arm64/assembler-arm64.cc
/trunk/src/arm64/code-stubs-arm64.cc
/trunk/src/arm64/lithium-codegen-arm64.cc
/trunk/src/arm64/macro-assembler-arm64-inl.h
/trunk/src/arm64/macro-assembler-arm64.cc
/trunk/src/arm64/macro-assembler-arm64.h
/trunk/src/ast.h
/trunk/src/base/platform/platform-win32.cc
/trunk/src/bootstrapper.cc
/trunk/src/code-stubs.h
/trunk/src/compiler/arm/code-generator-arm.cc
/trunk/src/compiler/arm/instruction-codes-arm.h
/trunk/src/compiler/arm/instruction-selector-arm.cc
/trunk/src/compiler/arm64/instruction-selector-arm64.cc
/trunk/src/compiler/code-generator.cc
/trunk/src/compiler/js-generic-lowering.cc
/trunk/src/compiler/js-graph.cc
/trunk/src/compiler/js-graph.h
/trunk/src/compiler/linkage.cc
/trunk/src/compiler/node-matchers.h
/trunk/src/compiler/schedule.h
/trunk/src/compiler/scheduler.cc
/trunk/src/compiler/x64/instruction-selector-x64.cc
/trunk/src/contexts.cc
/trunk/src/contexts.h
/trunk/src/factory.cc
/trunk/src/factory.h
/trunk/src/flag-definitions.h
/trunk/src/full-codegen.cc
/trunk/src/globals.h
/trunk/src/heap/gc-idle-time-handler.cc
/trunk/src/heap/gc-idle-time-handler.h
/trunk/src/heap/gc-tracer.cc
/trunk/src/heap/gc-tracer.h
/trunk/src/heap/heap.cc
/trunk/src/heap/heap.h
/trunk/src/hydrogen-instructions.h
/trunk/src/ia32/code-stubs-ia32.cc
/trunk/src/ic/ic.cc
/trunk/src/isolate.cc
/trunk/src/isolate.h
/trunk/src/mips/code-stubs-mips.cc
/trunk/src/mips64/code-stubs-mips64.cc
/trunk/src/objects-inl.h
/trunk/src/objects-printer.cc
/trunk/src/objects.cc
/trunk/src/objects.h
/trunk/src/parser.cc
/trunk/src/parser.h
/trunk/src/preparser.h
/trunk/src/runtime/runtime-classes.cc
/trunk/src/runtime/runtime-debug.cc
/trunk/src/runtime/runtime-literals.cc
/trunk/src/runtime/runtime-object.cc
/trunk/src/runtime/runtime-scopes.cc
/trunk/src/scanner.cc
/trunk/src/scanner.h
/trunk/src/version.cc
/trunk/src/x64/code-stubs-x64.cc
/trunk/src/x87/code-stubs-x87.cc
/trunk/src/x87/lithium-codegen-x87.cc
/trunk/test/cctest/cctest.status
/trunk/test/cctest/compiler/test-scheduler.cc
/trunk/test/cctest/test-assembler-arm.cc
/trunk/test/cctest/test-decls.cc
/trunk/test/cctest/test-disasm-arm.cc
/trunk/test/cctest/test-parsing.cc
/trunk/test/mjsunit/compiler/division-by-constant.js
/trunk/test/mjsunit/harmony/classes.js
/trunk/test/mjsunit/regress/regress-136048.js
/trunk/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
/trunk/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
/trunk/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
/trunk/test/unittests/heap/gc-idle-time-handler-unittest.cc
/trunk/test/unittests/unittests.gyp
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/asm/sign-extend.js Mon Nov 10 10:45:50 2014 UTC
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var sext8 = (function Module(stdlib, foreign, heap) {
+ "use asm";
+ function sext8(i) {
+ i = i|0;
+ i = i << 24 >> 24;
+ return i|0;
+ }
+ return { sext8: sext8 };
+})(stdlib, foreign, buffer).sext8;
+
+assertEquals(-128, sext8(128));
+assertEquals(-1, sext8(-1));
+assertEquals(-1, sext8(255));
+assertEquals(0, sext8(0));
+assertEquals(0, sext8(256));
+assertEquals(42, sext8(42));
+assertEquals(127, sext8(127));
+
+
+var sext16 = (function Module(stdlib, foreign, heap) {
+ "use asm";
+ function sext16(i) {
+ i = i|0;
+ i = i << 16 >> 16;
+ return i|0;
+ }
+ return { sext16: sext16 };
+})(stdlib, foreign, buffer).sext16;
+
+assertEquals(-32768, sext16(32768));
+assertEquals(-1, sext16(-1));
+assertEquals(-1, sext16(65535));
+assertEquals(0, sext16(0));
+assertEquals(0, sext16(65536));
+assertEquals(128, sext16(128));
+assertEquals(32767, sext16(32767));
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/asm/zero-extend.js Mon Nov 10 10:45:50 2014 UTC
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var zext8 = (function Module(stdlib, foreign, heap) {
+ "use asm";
+ function zext8(i) {
+ i = i|0;
+ return i & 0xff;
+ }
+ return { zext8: zext8 };
+})(stdlib, foreign, buffer).zext8;
+
+assertEquals(0, zext8(0));
+assertEquals(0, zext8(0x100));
+assertEquals(0xff, zext8(-1));
+assertEquals(0xff, zext8(0xff));
+
+
+var zext16 = (function Module(stdlib, foreign, heap) {
+ "use asm";
+ function zext16(i) {
+ i = i|0;
+ return i & 0xffff;
+ }
+ return { zext16: zext16 };
+})(stdlib, foreign, buffer).zext16;
+
+assertEquals(0, zext16(0));
+assertEquals(0, zext16(0x10000));
+assertEquals(0xffff, zext16(-1));
+assertEquals(0xffff, zext16(0xffff));
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/compiler/regress-uint8-deopt.js Mon Nov 10 10:45:50
2014 UTC
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-asm --turbo-deoptimization --allow-natives-syntax
+
+function Module(heap) {
+ "use asm";
+ var a = new Uint8Array(heap);
+ function f() {
+ var x = a[0] | 0;
+ %DeoptimizeFunction(f);
+ return x;
+ }
+ return f;
+}
+assertEquals(0, Module(new ArrayBuffer(1))());
=======================================
--- /dev/null
+++ /trunk/test/unittests/compiler/node-matchers-unittest.cc Mon Nov 10
10:45:50 2014 UTC
@@ -0,0 +1,317 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/opcodes.h"
+
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class NodeMatcherTest : public GraphTest {
+ public:
+ NodeMatcherTest() {}
+ virtual ~NodeMatcherTest() {}
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+};
+
+namespace {
+
+void CheckScaledWithOffsetMatch(ScaledWithOffsetMatcher* matcher, Node*
scaled,
+ int scale_exponent, Node* offset,
+ Node* constant) {
+ EXPECT_TRUE(matcher->matches());
+ EXPECT_EQ(scaled, matcher->scaled());
+ EXPECT_EQ(scale_exponent, matcher->scale_exponent());
+ EXPECT_EQ(offset, matcher->offset());
+ EXPECT_EQ(constant, matcher->constant());
+}
+};
+
+
+TEST_F(NodeMatcherTest, ScaledWithOffsetMatcher) {
+ graph()->SetStart(graph()->NewNode(common()->Start(0)));
+
+ const Operator* c0_op = common()->Int32Constant(0);
+ Node* c0 = graph()->NewNode(c0_op);
+ USE(c0);
+ const Operator* c1_op = common()->Int32Constant(1);
+ Node* c1 = graph()->NewNode(c1_op);
+ USE(c1);
+ const Operator* c2_op = common()->Int32Constant(2);
+ Node* c2 = graph()->NewNode(c2_op);
+ USE(c2);
+ const Operator* c3_op = common()->Int32Constant(3);
+ Node* c3 = graph()->NewNode(c3_op);
+ USE(c3);
+ const Operator* c4_op = common()->Int32Constant(4);
+ Node* c4 = graph()->NewNode(c4_op);
+ USE(c4);
+ const Operator* c8_op = common()->Int32Constant(8);
+ Node* c8 = graph()->NewNode(c8_op);
+ USE(c8);
+
+ const Operator* o0_op = common()->Parameter(0);
+ Node* o0 = graph()->NewNode(o0_op, graph()->start());
+ USE(o0);
+ const Operator* o1_op = common()->Parameter(1);
+ Node* o1 = graph()->NewNode(o1_op, graph()->start());
+ USE(o0);
+
+ const Operator* p1_op = common()->Parameter(3);
+ Node* p1 = graph()->NewNode(p1_op, graph()->start());
+ USE(p1);
+
+ const Operator* a_op = machine()->Int32Add();
+ USE(a_op);
+
+ const Operator* m_op = machine()->Int32Mul();
+ Node* m1 = graph()->NewNode(m_op, p1, c1);
+ Node* m2 = graph()->NewNode(m_op, p1, c2);
+ Node* m4 = graph()->NewNode(m_op, p1, c4);
+ Node* m8 = graph()->NewNode(m_op, p1, c8);
+ Node* m3 = graph()->NewNode(m_op, p1, c3);
+
+ const Operator* s_op = machine()->Word32Shl();
+ Node* s0 = graph()->NewNode(s_op, p1, c0);
+ Node* s1 = graph()->NewNode(s_op, p1, c1);
+ Node* s2 = graph()->NewNode(s_op, p1, c2);
+ Node* s3 = graph()->NewNode(s_op, p1, c3);
+ Node* s4 = graph()->NewNode(s_op, p1, c4);
+
+ // 1 INPUT
+
+ // Only relevant test cases is checking for non-match.
+ ScaledWithOffsetMatcher match0(c0);
+ EXPECT_FALSE(match0.matches());
+
+ // 2 INPUT
+
+ // (O0 + O1) -> [O0, 0, O1, NULL]
+ ScaledWithOffsetMatcher match1(graph()->NewNode(a_op, o0, o1));
+ CheckScaledWithOffsetMatch(&match1, o1, 0, o0, NULL);
+
+ // (O0 + C0) -> [NULL, 0, O0, C0]
+ ScaledWithOffsetMatcher match2(graph()->NewNode(a_op, o0, c0));
+ CheckScaledWithOffsetMatch(&match2, NULL, 0, o0, c0);
+
+ // (C0 + O0) -> [NULL, 0, O0, C0]
+ ScaledWithOffsetMatcher match3(graph()->NewNode(a_op, c0, o0));
+ CheckScaledWithOffsetMatch(&match3, NULL, 0, o0, c0);
+
+ // (O0 + M1) -> [p1, 0, O0, NULL]
+ ScaledWithOffsetMatcher match4(graph()->NewNode(a_op, o0, m1));
+ CheckScaledWithOffsetMatch(&match4, p1, 0, o0, NULL);
+
+ // (M1 + O0) -> [p1, 0, O0, NULL]
+ m1 = graph()->NewNode(m_op, p1, c1);
+ ScaledWithOffsetMatcher match5(graph()->NewNode(a_op, m1, o0));
+ CheckScaledWithOffsetMatch(&match5, p1, 0, o0, NULL);
+
+ // (C0 + M1) -> [P1, 0, NULL, C0]
+ m1 = graph()->NewNode(m_op, p1, c1);
+ ScaledWithOffsetMatcher match6(graph()->NewNode(a_op, c0, m1));
+ CheckScaledWithOffsetMatch(&match6, p1, 0, NULL, c0);
+
+ // (M1 + C0) -> [P1, 0, NULL, C0]
+ m1 = graph()->NewNode(m_op, p1, c1);
+ ScaledWithOffsetMatcher match7(graph()->NewNode(a_op, m1, c0));
+ CheckScaledWithOffsetMatch(&match7, p1, 0, NULL, c0);
+
+ // (O0 + S0) -> [p1, 0, O0, NULL]
+ ScaledWithOffsetMatcher match8(graph()->NewNode(a_op, o0, s0));
+ CheckScaledWithOffsetMatch(&match8, p1, 0, o0, NULL);
+
+ // (S0 + O0) -> [p1, 0, O0, NULL]
+ s0 = graph()->NewNode(s_op, p1, c0);
+ ScaledWithOffsetMatcher match9(graph()->NewNode(a_op, s0, o0));
+ CheckScaledWithOffsetMatch(&match9, p1, 0, o0, NULL);
+
+ // (C0 + S0) -> [P1, 0, NULL, C0]
+ s0 = graph()->NewNode(s_op, p1, c0);
+ ScaledWithOffsetMatcher match10(graph()->NewNode(a_op, c0, s0));
+ CheckScaledWithOffsetMatch(&match10, p1, 0, NULL, c0);
+
+ // (S0 + C0) -> [P1, 0, NULL, C0]
+ s0 = graph()->NewNode(s_op, p1, c0);
+ ScaledWithOffsetMatcher match11(graph()->NewNode(a_op, s0, c0));
+ CheckScaledWithOffsetMatch(&match11, p1, 0, NULL, c0);
+
+ // (O0 + M2) -> [p1, 1, O0, NULL]
+ ScaledWithOffsetMatcher match12(graph()->NewNode(a_op, o0, m2));
+ CheckScaledWithOffsetMatch(&match12, p1, 1, o0, NULL);
+
+ // (M2 + O0) -> [p1, 1, O0, NULL]
+ m2 = graph()->NewNode(m_op, p1, c2);
+ ScaledWithOffsetMatcher match13(graph()->NewNode(a_op, m2, o0));
+ CheckScaledWithOffsetMatch(&match13, p1, 1, o0, NULL);
+
+ // (C0 + M2) -> [P1, 1, NULL, C0]
+ m2 = graph()->NewNode(m_op, p1, c2);
+ ScaledWithOffsetMatcher match14(graph()->NewNode(a_op, c0, m2));
+ CheckScaledWithOffsetMatch(&match14, p1, 1, NULL, c0);
+
+ // (M2 + C0) -> [P1, 1, NULL, C0]
+ m2 = graph()->NewNode(m_op, p1, c2);
+ ScaledWithOffsetMatcher match15(graph()->NewNode(a_op, m2, c0));
+ CheckScaledWithOffsetMatch(&match15, p1, 1, NULL, c0);
+
+ // (O0 + S1) -> [p1, 1, O0, NULL]
+ ScaledWithOffsetMatcher match16(graph()->NewNode(a_op, o0, s1));
+ CheckScaledWithOffsetMatch(&match16, p1, 1, o0, NULL);
+
+ // (S1 + O0) -> [p1, 1, O0, NULL]
+ s1 = graph()->NewNode(s_op, p1, c1);
+ ScaledWithOffsetMatcher match17(graph()->NewNode(a_op, s1, o0));
+ CheckScaledWithOffsetMatch(&match17, p1, 1, o0, NULL);
+
+ // (C0 + S1) -> [P1, 1, NULL, C0]
+ s1 = graph()->NewNode(s_op, p1, c1);
+ ScaledWithOffsetMatcher match18(graph()->NewNode(a_op, c0, s1));
+ CheckScaledWithOffsetMatch(&match18, p1, 1, NULL, c0);
+
+ // (S1 + C0) -> [P1, 1, NULL, C0]
+ s1 = graph()->NewNode(s_op, p1, c1);
+ ScaledWithOffsetMatcher match19(graph()->NewNode(a_op, s1, c0));
+ CheckScaledWithOffsetMatch(&match19, p1, 1, NULL, c0);
+
+ // (O0 + M4) -> [p1, 2, O0, NULL]
+ ScaledWithOffsetMatcher match20(graph()->NewNode(a_op, o0, m4));
+ CheckScaledWithOffsetMatch(&match20, p1, 2, o0, NULL);
+
+ // (M4 + O0) -> [p1, 2, O0, NULL]
+ m4 = graph()->NewNode(m_op, p1, c4);
+ ScaledWithOffsetMatcher match21(graph()->NewNode(a_op, m4, o0));
+ CheckScaledWithOffsetMatch(&match21, p1, 2, o0, NULL);
+
+ // (C0 + M4) -> [p1, 2, NULL, C0]
+ m4 = graph()->NewNode(m_op, p1, c4);
+ ScaledWithOffsetMatcher match22(graph()->NewNode(a_op, c0, m4));
+ CheckScaledWithOffsetMatch(&match22, p1, 2, NULL, c0);
+
+ // (M4 + C0) -> [p1, 2, NULL, C0]
+ m4 = graph()->NewNode(m_op, p1, c4);
+ ScaledWithOffsetMatcher match23(graph()->NewNode(a_op, m4, c0));
+ CheckScaledWithOffsetMatch(&match23, p1, 2, NULL, c0);
+
+ // (O0 + S2) -> [p1, 2, O0, NULL]
+ ScaledWithOffsetMatcher match24(graph()->NewNode(a_op, o0, s2));
+ CheckScaledWithOffsetMatch(&match24, p1, 2, o0, NULL);
+
+ // (S2 + O0) -> [p1, 2, O0, NULL]
+ s2 = graph()->NewNode(s_op, p1, c2);
+ ScaledWithOffsetMatcher match25(graph()->NewNode(a_op, s2, o0));
+ CheckScaledWithOffsetMatch(&match25, p1, 2, o0, NULL);
+
+ // (C0 + S2) -> [p1, 2, NULL, C0]
+ s2 = graph()->NewNode(s_op, p1, c2);
+ ScaledWithOffsetMatcher match26(graph()->NewNode(a_op, c0, s2));
+ CheckScaledWithOffsetMatch(&match26, p1, 2, NULL, c0);
+
+ // (S2 + C0) -> [p1, 2, NULL, C0]
+ s2 = graph()->NewNode(s_op, p1, c2);
+ ScaledWithOffsetMatcher match27(graph()->NewNode(a_op, s2, c0));
+ CheckScaledWithOffsetMatch(&match27, p1, 2, NULL, c0);
+
+ // (O0 + M8) -> [p1, 2, O0, NULL]
+ ScaledWithOffsetMatcher match28(graph()->NewNode(a_op, o0, m8));
+ CheckScaledWithOffsetMatch(&match28, p1, 3, o0, NULL);
+
+ // (M8 + O0) -> [p1, 2, O0, NULL]
+ m8 = graph()->NewNode(m_op, p1, c8);
+ ScaledWithOffsetMatcher match29(graph()->NewNode(a_op, m8, o0));
+ CheckScaledWithOffsetMatch(&match29, p1, 3, o0, NULL);
+
+ // (C0 + M8) -> [p1, 2, NULL, C0]
+ m8 = graph()->NewNode(m_op, p1, c8);
+ ScaledWithOffsetMatcher match30(graph()->NewNode(a_op, c0, m8));
+ CheckScaledWithOffsetMatch(&match30, p1, 3, NULL, c0);
+
+ // (M8 + C0) -> [p1, 2, NULL, C0]
+ m8 = graph()->NewNode(m_op, p1, c8);
+ ScaledWithOffsetMatcher match31(graph()->NewNode(a_op, m8, c0));
+ CheckScaledWithOffsetMatch(&match31, p1, 3, NULL, c0);
+
+ // (O0 + S3) -> [p1, 2, O0, NULL]
+ ScaledWithOffsetMatcher match32(graph()->NewNode(a_op, o0, s3));
+ CheckScaledWithOffsetMatch(&match32, p1, 3, o0, NULL);
+
+ // (S3 + O0) -> [p1, 2, O0, NULL]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match33(graph()->NewNode(a_op, s3, o0));
+ CheckScaledWithOffsetMatch(&match33, p1, 3, o0, NULL);
+
+ // (C0 + S3) -> [p1, 2, NULL, C0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match34(graph()->NewNode(a_op, c0, s3));
+ CheckScaledWithOffsetMatch(&match34, p1, 3, NULL, c0);
+
+ // (S3 + C0) -> [p1, 2, NULL, C0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match35(graph()->NewNode(a_op, s3, c0));
+ CheckScaledWithOffsetMatch(&match35, p1, 3, NULL, c0);
+
+ // 2 INPUT - NEGATIVE CASES
+
+ // (M3 + O1) -> [O0, 0, M3, NULL]
+ ScaledWithOffsetMatcher match36(graph()->NewNode(a_op, o1, m3));
+ CheckScaledWithOffsetMatch(&match36, m3, 0, o1, NULL);
+
+ // (S4 + O1) -> [O0, 0, S4, NULL]
+ ScaledWithOffsetMatcher match37(graph()->NewNode(a_op, o1, s4));
+ CheckScaledWithOffsetMatch(&match37, s4, 0, o1, NULL);
+
+ // 3 INPUT
+
+ // (C0 + S3) + O0 -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match38(
+ graph()->NewNode(a_op, graph()->NewNode(a_op, c0, s3), o0));
+ CheckScaledWithOffsetMatch(&match38, p1, 3, o0, c0);
+
+ // (O0 + C0) + S3 -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match39(
+ graph()->NewNode(a_op, graph()->NewNode(a_op, o0, c0), s3));
+ CheckScaledWithOffsetMatch(&match39, p1, 3, o0, c0);
+
+ // (S3 + O0) + C0 -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match40(
+ graph()->NewNode(a_op, graph()->NewNode(a_op, s3, o0), c0));
+ CheckScaledWithOffsetMatch(&match40, p1, 3, o0, c0);
+
+ // C0 + (S3 + O0) -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match41(
+ graph()->NewNode(a_op, c0, graph()->NewNode(a_op, s3, o0)));
+ CheckScaledWithOffsetMatch(&match41, p1, 3, o0, c0);
+
+ // O0 + (C0 + S3) -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match42(
+ graph()->NewNode(a_op, o0, graph()->NewNode(a_op, c0, s3)));
+ CheckScaledWithOffsetMatch(&match42, p1, 3, o0, c0);
+
+ // S3 + (O0 + C0) -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match43(
+ graph()->NewNode(a_op, s3, graph()->NewNode(a_op, o0, c0)));
+ CheckScaledWithOffsetMatch(&match43, p1, 3, o0, c0);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /dev/null
+++ /trunk/tools/trace-maps-processor.py Mon Nov 10 10:45:50 2014 UTC
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+
+action = sys.argv[1]
+
+if action in ["help", "-h", "--help"] or len(sys.argv) != 3:
+ print("Usage: %s <action> <inputfile>, where action can be: \n"
+ "help Print this message\n"
+ "plain Print ASCII tree to stdout\n"
+ "dot Print dot file to stdout\n"
+ "count Count most frequent transition reasons\n" % sys.argv[0])
+ sys.exit(0)
+
+
+filename = sys.argv[2]
+maps = {}
+root_maps = []
+transitions = {}
+annotations = {}
+
+
+class Map(object):
+
+ def __init__(self, pointer, origin):
+ self.pointer = pointer
+ self.origin = origin
+
+ def __str__(self):
+ return "%s (%s)" % (self.pointer, self.origin)
+
+
+class Transition(object):
+
+ def __init__(self, from_map, to_map, reason):
+ self.from_map = from_map
+ self.to_map = to_map
+ self.reason = reason
+
+
+def RegisterNewMap(raw_map):
+ if raw_map in annotations:
+ annotations[raw_map] += 1
+ else:
+ annotations[raw_map] = 0
+ return AnnotateExistingMap(raw_map)
+
+
+def AnnotateExistingMap(raw_map):
+ return "%s_%d" % (raw_map, annotations[raw_map])
+
+
+def AddMap(pointer, origin):
+ pointer = RegisterNewMap(pointer)
+ maps[pointer] = Map(pointer, origin)
+ return pointer
+
+
+def AddTransition(from_map, to_map, reason):
+ from_map = AnnotateExistingMap(from_map)
+ to_map = AnnotateExistingMap(to_map)
+ if from_map not in transitions:
+ transitions[from_map] = {}
+ targets = transitions[from_map]
+ if to_map in targets:
+ # Some events get printed twice, that's OK. In some cases, ignore the
+ # second output...
+ old_reason = targets[to_map].reason
+ if old_reason.startswith("ReplaceDescriptors"):
+ return
+ # ...and in others use it for additional detail.
+ if reason in []:
+ targets[to_map].reason = reason
+ return
+ # Unexpected duplicate events? Warn.
+ print("// warning: already have a transition from %s to %s,
reason: %s" %
+ (from_map, to_map, targets[to_map].reason))
+ return
+ targets[to_map] = Transition(from_map, to_map, reason)
+
+
+with open(filename, "r") as f:
+ last_to_map = ""
+ for line in f:
+ if not line.startswith("[TraceMaps: "): continue
+ words = line.split(" ")
+ event = words[1]
+ if event == "InitialMap":
+ assert words[2] == "map="
+ assert words[4] == "SFI="
+ new_map = AddMap(words[3], "SFI#%s" % words[5])
+ root_maps.append(new_map)
+ continue
+ if words[2] == "from=" and words[4] == "to=":
+ from_map = words[3]
+ to_map = words[5]
+ if from_map not in annotations:
+ print("// warning: unknown from_map %s" % from_map)
+ new_map = AddMap(from_map, "<unknown>")
+ root_maps.append(new_map)
+ if to_map != last_to_map:
+ AddMap(to_map, "<transition> (%s)" % event)
+ last_to_map = to_map
+ if event in ["Transition", "NoTransition"]:
+ assert words[6] == "name=", line
+ reason = "%s: %s" % (event, words[7])
+ elif event in ["Normalize", "ReplaceDescriptors", "SlowToFast"]:
+ assert words[6] == "reason=", line
+ reason = "%s: %s" % (event, words[7])
+ if words[8].strip() != "]":
+ reason = "%s_%s" % (reason, words[8])
+ else:
+ reason = event
+ AddTransition(from_map, to_map, reason)
+ continue
+
+
+def PlainPrint(m, indent, label):
+ print("%s%s (%s)" % (indent, m, label))
+ if m in transitions:
+ for t in transitions[m]:
+ PlainPrint(t, indent + " ", transitions[m][t].reason)
+
+
+def CountTransitions(m):
+ if m not in transitions: return 0
+ return len(transitions[m])
+
+
+def DotPrint(m, label):
+ print("m%s [label=\"%s\"]" % (m[2:], label))
+ if m in transitions:
+ for t in transitions[m]:
+ # GraphViz doesn't like node labels looking like numbers, so use
+ # "m..." instead of "0x...".
+ print("m%s -> m%s" % (m[2:], t[2:]))
+ reason = transitions[m][t].reason
+ reason = reason.replace("\\", "BACKSLASH")
+ reason = reason.replace("\"", "\\\"")
+ DotPrint(t, reason)
+
+
+if action == "plain":
+ root_maps = sorted(root_maps, key=CountTransitions, reverse=True)
+ for m in root_maps:
+ PlainPrint(m, "", maps[m].origin)
+
+elif action == "dot":
+ print("digraph g {")
+ for m in root_maps:
+ DotPrint(m, maps[m].origin)
+ print("}")
+
+elif action == "count":
+ reasons = {}
+ for s in transitions:
+ for t in transitions[s]:
+ reason = transitions[s][t].reason
+ if reason not in reasons:
+ reasons[reason] = 1
+ else:
+ reasons[reason] += 1
+ reasons_list = []
+ for r in reasons:
+ reasons_list.append("%8d %s" % (reasons[r], r))
+ reasons_list.sort(reverse=True)
+ for r in reasons_list[:20]:
+ print r
=======================================
--- /trunk/Makefile Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/Makefile Mon Nov 10 10:45:50 2014 UTC
@@ -64,6 +64,10 @@
ifeq ($(verifyheap), on)
GYPFLAGS += -Dv8_enable_verify_heap=1
endif
+# tracemaps=on
+ifeq ($(tracemaps), on)
+ GYPFLAGS += -Dv8_trace_maps=1
+endif
# backtrace=off
ifeq ($(backtrace), off)
GYPFLAGS += -Dv8_enable_backtrace=0
=======================================
--- /trunk/build/features.gypi Mon Jul 7 00:05:07 2014 UTC
+++ /trunk/build/features.gypi Mon Nov 10 10:45:50 2014 UTC
@@ -39,6 +39,8 @@
'v8_enable_verify_heap%': 0,
+ 'v8_trace_maps%': 0,
+
'v8_use_snapshot%': 'true',
'v8_enable_verify_predictable%': 0,
@@ -77,6 +79,9 @@
['v8_enable_verify_heap==1', {
'defines': ['VERIFY_HEAP',],
}],
+ ['v8_trace_maps==1', {
+ 'defines': ['TRACE_MAPS',],
+ }],
['v8_enable_verify_predictable==1', {
'defines': ['VERIFY_PREDICTABLE',],
}],
=======================================
--- /trunk/build/toolchain.gypi Fri Oct 10 00:05:16 2014 UTC
+++ /trunk/build/toolchain.gypi Mon Nov 10 10:45:50 2014 UTC
@@ -852,7 +852,8 @@
'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
- 'DEBUG'
+ 'DEBUG',
+ 'TRACE_MAPS'
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"
or \
=======================================
--- /trunk/include/v8.h Wed Nov 5 01:04:48 2014 UTC
+++ /trunk/include/v8.h Mon Nov 10 10:45:50 2014 UTC
@@ -6110,7 +6110,7 @@
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 154;
+ static const int kEmptyStringRootIndex = 155;
// The external allocation limit should be below 256 MB on all
architectures
// to avoid that resource-constrained embedders run low on memory.
=======================================
--- /trunk/src/api.cc Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/api.cc Mon Nov 10 10:45:50 2014 UTC
@@ -3636,7 +3636,9 @@
i::JSObject::SetAccessor(Utils::OpenHandle(obj), info),
false);
if (result->IsUndefined()) return false;
- if (fast) i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0);
+ if (fast) {
+ i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj),
0, "APISetAccessor");
+ }
return true;
}
@@ -3822,7 +3824,8 @@
// as optimized code does not always handle access checks.
i::Deoptimizer::DeoptimizeGlobalObject(*obj);
- i::Handle<i::Map> new_map = i::Map::Copy(i::Handle<i::Map>(obj->map()));
+ i::Handle<i::Map> new_map =
+ i::Map::Copy(i::Handle<i::Map>(obj->map()), "APITurnOnAccessCheck");
new_map->set_is_access_check_needed(true);
i::JSObject::MigrateToMap(obj, new_map);
}
=======================================
--- /trunk/src/arm/assembler-arm.cc Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/arm/assembler-arm.cc Mon Nov 10 10:45:50 2014 UTC
@@ -1798,71 +1798,119 @@
}
-void Assembler::uxtb(Register dst,
- const Operand& src,
- Condition cond) {
+void Assembler::sxtb(Register dst, Register src, int rotate, Condition
cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.233.
+ // cond(31-28) | 01101010(27-20) | 1111(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src.is(pc));
+ DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+ emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
+ ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::sxtab(Register dst, Register src1, Register src2, int
rotate,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.233.
+ // cond(31-28) | 01101010(27-20) | Rn(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src1.is(pc));
+ DCHECK(!src2.is(pc));
+ DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+ emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
+ ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
+}
+
+
+void Assembler::sxth(Register dst, Register src, int rotate, Condition
cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.235.
+ // cond(31-28) | 01101011(27-20) | 1111(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src.is(pc));
+ DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+ emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
+ ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::sxtah(Register dst, Register src1, Register src2, int
rotate,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.235.
+ // cond(31-28) | 01101011(27-20) | Rn(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src1.is(pc));
+ DCHECK(!src2.is(pc));
+ DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+ emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
+ ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
+}
+
+
+void Assembler::uxtb(Register dst, Register src, int rotate, Condition
cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.274.
// cond(31-28) | 01101110(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
- DCHECK(!src.rm().is(pc));
- DCHECK(!src.rm().is(no_reg));
- DCHECK(src.rs().is(no_reg));
- DCHECK((src.shift_imm_ == 0) ||
- (src.shift_imm_ == 8) ||
- (src.shift_imm_ == 16) ||
- (src.shift_imm_ == 24));
- // Operand maps ROR #0 to LSL #0.
- DCHECK((src.shift_op() == ROR) ||
- ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
- emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
- ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+ DCHECK(!src.is(pc));
+ DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+ emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
+ ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
-void Assembler::uxtab(Register dst,
- Register src1,
- const Operand& src2,
+void Assembler::uxtab(Register dst, Register src1, Register src2, int
rotate,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.271.
// cond(31-28) | 01101110(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
- DCHECK(!src2.rm().is(pc));
- DCHECK(!src2.rm().is(no_reg));
- DCHECK(src2.rs().is(no_reg));
- DCHECK((src2.shift_imm_ == 0) ||
- (src2.shift_imm_ == 8) ||
- (src2.shift_imm_ == 16) ||
- (src2.shift_imm_ == 24));
- // Operand maps ROR #0 to LSL #0.
- DCHECK((src2.shift_op() == ROR) ||
- ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
- emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
- ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
+ DCHECK(!src2.is(pc));
+ DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+ emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
+ ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
-void Assembler::uxtb16(Register dst,
- const Operand& src,
- Condition cond) {
+void Assembler::uxtb16(Register dst, Register src, int rotate, Condition
cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.275.
// cond(31-28) | 01101100(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
- DCHECK(!src.rm().is(pc));
- DCHECK(!src.rm().is(no_reg));
- DCHECK(src.rs().is(no_reg));
- DCHECK((src.shift_imm_ == 0) ||
- (src.shift_imm_ == 8) ||
- (src.shift_imm_ == 16) ||
- (src.shift_imm_ == 24));
- // Operand maps ROR #0 to LSL #0.
- DCHECK((src.shift_op() == ROR) ||
- ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
- emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
- ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+ DCHECK(!src.is(pc));
+ DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+ emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
+ ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::uxth(Register dst, Register src, int rotate, Condition
cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.276.
+ // cond(31-28) | 01101111(27-20) | 1111(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src.is(pc));
+ DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+ emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
+ ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::uxtah(Register dst, Register src1, Register src2, int
rotate,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.273.
+ // cond(31-28) | 01101111(27-20) | Rn(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src1.is(pc));
+ DCHECK(!src2.is(pc));
+ DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+ emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
+ ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
=======================================
--- /trunk/src/arm/assembler-arm.h Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/arm/assembler-arm.h Mon Nov 10 10:45:50 2014 UTC
@@ -1034,12 +1034,20 @@
void pkhtb(Register dst, Register src1, const Operand& src2,
Condition cond = al);
- void uxtb(Register dst, const Operand& src, Condition cond = al);
-
- void uxtab(Register dst, Register src1, const Operand& src2,
+ void sxtb(Register dst, Register src, int rotate = 0, Condition cond =
al);
+ void sxtab(Register dst, Register src1, Register src2, int rotate = 0,
Condition cond = al);
+ void sxth(Register dst, Register src, int rotate = 0, Condition cond =
al);
+ void sxtah(Register dst, Register src1, Register src2, int rotate = 0,
+ Condition cond = al);
- void uxtb16(Register dst, const Operand& src, Condition cond = al);
+ void uxtb(Register dst, Register src, int rotate = 0, Condition cond =
al);
+ void uxtab(Register dst, Register src1, Register src2, int rotate = 0,
+ Condition cond = al);
+ void uxtb16(Register dst, Register src, int rotate = 0, Condition cond =
al);
+ void uxth(Register dst, Register src, int rotate = 0, Condition cond =
al);
+ void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
+ Condition cond = al);
// Status register access instructions
=======================================
--- /trunk/src/arm/code-stubs-arm.cc Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/arm/code-stubs-arm.cc Mon Nov 10 10:45:50 2014 UTC
@@ -2686,6 +2686,10 @@
void CallICStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id (Smi)
+ const int with_types_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+ const int generic_offset =
+
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
@@ -2724,37 +2728,70 @@
}
__ bind(&extra_checks_or_miss);
- Label miss;
+ Label uninitialized, miss;
__ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &slow_start);
+
+ // The following cases attempt to handle MISS cases without going to the
+ // runtime.
+ if (FLAG_trace_ic) {
+ __ jmp(&miss);
+ }
+
__ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
+ __ b(eq, &uninitialized);
+
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(r4);
+ __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+ __ b(ne, &miss);
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ // We have to update statistics for runtime profiling.
+ __ ldr(r4, FieldMemOperand(r2, with_types_offset));
+ __ sub(r4, r4, Operand(Smi::FromInt(1)));
+ __ str(r4, FieldMemOperand(r2, with_types_offset));
+ __ ldr(r4, FieldMemOperand(r2, generic_offset));
+ __ add(r4, r4, Operand(Smi::FromInt(1)));
+ __ str(r4, FieldMemOperand(r2, generic_offset));
+ __ jmp(&slow_start);
+
+ __ bind(&uninitialized);
+
+ // We are going monomorphic, provided we actually have a JSFunction.
+ __ JumpIfSmi(r1, &miss);
+
+ // Goto miss case if we do not have a function.
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ b(ne, &miss);
+
+ // Make sure the function is not the Array() function, which requires
special
+ // behavior on MISS.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
__ b(eq, &miss);
- if (!FLAG_trace_ic) {
- // We are going megamorphic. If the feedback is a JSFunction, it is
fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(r4);
- __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
- __ b(ne, &miss);
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- __ ldr(r4, FieldMemOperand(r2, with_types_offset));
- __ sub(r4, r4, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(r2, with_types_offset));
- const int generic_offset =
-
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- __ ldr(r4, FieldMemOperand(r2, generic_offset));
- __ add(r4, r4, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(r2, generic_offset));
- __ jmp(&slow_start);
- }
+ // Update stats.
+ __ ldr(r4, FieldMemOperand(r2, with_types_offset));
+ __ add(r4, r4, Operand(Smi::FromInt(1)));
+ __ str(r4, FieldMemOperand(r2, with_types_offset));
+
+ // Store the function.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r1, MemOperand(r4, 0));
+
+ // Update the write barrier.
+ __ mov(r5, r1);
+ __ RecordWrite(r2, r4, r5, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ jmp(&have_js_function);
- // We are here because tracing is on or we are going monomorphic.
+ // We are here because tracing is on or we encountered a MISS case we
can't
+ // handle here.
__ bind(&miss);
GenerateMiss(masm);
=======================================
--- /trunk/src/arm/codegen-arm.cc Fri Oct 10 00:05:16 2014 UTC
+++ /trunk/src/arm/codegen-arm.cc Mon Nov 10 10:45:50 2014 UTC
@@ -288,8 +288,8 @@
__ bind(&loop);
__ ldr(temp1, MemOperand(src, 4, PostIndex));
- __ uxtb16(temp3, Operand(temp1, ROR, 0));
- __ uxtb16(temp4, Operand(temp1, ROR, 8));
+ __ uxtb16(temp3, temp1);
+ __ uxtb16(temp4, temp1, 8);
__ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
__ str(temp1, MemOperand(dest));
__ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
@@ -301,9 +301,9 @@
__ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 =>
cs
__ b(¬_two, cc);
__ ldrh(temp1, MemOperand(src, 2, PostIndex));
- __ uxtb(temp3, Operand(temp1, ROR, 8));
+ __ uxtb(temp3, temp1, 8);
__ mov(temp3, Operand(temp3, LSL, 16));
- __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
+ __ uxtab(temp3, temp3, temp1);
__ str(temp3, MemOperand(dest, 4, PostIndex));
__ bind(¬_two);
__ ldrb(temp1, MemOperand(src), ne);
=======================================
--- /trunk/src/arm/disasm-arm.cc Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/arm/disasm-arm.cc Mon Nov 10 10:45:50 2014 UTC
@@ -1027,7 +1027,75 @@
UNREACHABLE();
break;
case 1:
- UNREACHABLE();
+ if (instr->Bits(9, 6) == 1) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxtb'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxtb'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxtb'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxtb'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxth'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxth'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxth'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxth'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
case 2:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
@@ -1054,36 +1122,70 @@
}
break;
case 3:
- if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
- if (instr->Bits(19, 16) == 0xF) {
- switch (instr->Bits(11, 10)) {
- case 0:
- Format(instr, "uxtb'cond 'rd, 'rm");
- break;
- case 1:
- Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
- break;
- case 2:
- Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
- break;
- case 3:
- Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
- break;
+ if ((instr->Bits(9, 6) == 1)) {
+ if ((instr->Bit(20) == 0)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
}
} else {
- switch (instr->Bits(11, 10)) {
- case 0:
- Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
- break;
- case 1:
- Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
- break;
- case 2:
- Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
- break;
- case 3:
- Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
- break;
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxth'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxth'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxth'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxth'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
}
}
} else {
=======================================
--- /trunk/src/arm/simulator-arm.cc Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/arm/simulator-arm.cc Mon Nov 10 10:45:50 2014 UTC
@@ -2629,7 +2629,89 @@
UNIMPLEMENTED();
break;
case 1:
- UNIMPLEMENTED();
+ if (instr->Bits(9, 6) == 1) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bits(19, 16) == 0xF) {
+ // Sxtb.
+ int32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, static_cast<int8_t>(rm_val));
+ } else {
+ // Sxtab.
+ int32_t rn_val = get_register(rn);
+ int32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, rn_val + static_cast<int8_t>(rm_val));
+ }
+ } else {
+ if (instr->Bits(19, 16) == 0xF) {
+ // Sxth.
+ int32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, static_cast<int16_t>(rm_val));
+ } else {
+ // Sxtah.
+ int32_t rn_val = get_register(rn);
+ int32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, rn_val +
static_cast<int16_t>(rm_val));
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
case 2:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
@@ -2650,8 +2732,7 @@
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
- set_register(rd,
- (rm_val & 0xFF) | (rm_val & 0xFF0000));
+ set_register(rd, (rm_val & 0xFF) | (rm_val & 0xFF0000));
} else {
UNIMPLEMENTED();
}
@@ -2660,44 +2741,85 @@
}
break;
case 3:
- if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
- if (instr->Bits(19, 16) == 0xF) {
- // Uxtb.
- uint32_t rm_val = get_register(instr->RmValue());
- int32_t rotate = instr->Bits(11, 10);
- switch (rotate) {
- case 0:
- break;
- case 1:
- rm_val = (rm_val >> 8) | (rm_val << 24);
- break;
- case 2:
- rm_val = (rm_val >> 16) | (rm_val << 16);
- break;
- case 3:
- rm_val = (rm_val >> 24) | (rm_val << 8);
- break;
+ if ((instr->Bits(9, 6) == 1)) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bits(19, 16) == 0xF) {
+ // Uxtb.
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, (rm_val & 0xFF));
+ } else {
+ // Uxtab.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, rn_val + (rm_val & 0xFF));
}
- set_register(rd, (rm_val & 0xFF));
} else {
- // Uxtab.
- uint32_t rn_val = get_register(rn);
- uint32_t rm_val = get_register(instr->RmValue());
- int32_t rotate = instr->Bits(11, 10);
- switch (rotate) {
- case 0:
- break;
- case 1:
- rm_val = (rm_val >> 8) | (rm_val << 24);
- break;
- case 2:
- rm_val = (rm_val >> 16) | (rm_val << 16);
- break;
- case 3:
- rm_val = (rm_val >> 24) | (rm_val << 8);
- break;
+ if (instr->Bits(19, 16) == 0xF) {
+ // Uxth.
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, (rm_val & 0xFFFF));
+ } else {
+ // Uxtah.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, rn_val + (rm_val & 0xFFFF));
}
- set_register(rd, rn_val + (rm_val & 0xFF));
}
} else {
UNIMPLEMENTED();
=======================================
--- /trunk/src/arm64/assembler-arm64.cc Thu Oct 23 08:44:45 2014 UTC
+++ /trunk/src/arm64/assembler-arm64.cc Mon Nov 10 10:45:50 2014 UTC
@@ -44,17 +44,8 @@
// CpuFeatures implementation.
void CpuFeatures::ProbeImpl(bool cross_compile) {
- if (cross_compile) {
- // Always align csp in cross compiled code - this is safe and ensures
that
- // csp will always be aligned if it is enabled by probing at runtime.
- if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
- } else {
- base::CPU cpu;
- if (FLAG_enable_always_align_csp &&
- (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
- supported_ |= 1u << ALWAYS_ALIGN_CSP;
- }
- }
+ // AArch64 has no configuration options, no further probing is required.
+ supported_ = 0;
}
=======================================
--- /trunk/src/arm64/code-stubs-arm64.cc Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/arm64/code-stubs-arm64.cc Mon Nov 10 10:45:50 2014 UTC
@@ -3016,6 +3016,10 @@
// x1 - function
// x3 - slot id (Smi)
+ const int with_types_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+ const int generic_offset =
+
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
@@ -3064,35 +3068,72 @@
}
__ bind(&extra_checks_or_miss);
- Label miss;
+ Label uninitialized, miss;
__ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
+
+ // The following cases attempt to handle MISS cases without going to the
+ // runtime.
+ if (FLAG_trace_ic) {
+ __ jmp(&miss);
+ }
+
__ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
- if (!FLAG_trace_ic) {
- // We are going megamorphic. If the feedback is a JSFunction, it is
fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(x4);
- __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
- __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Subs(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
- const int generic_offset =
-
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
- __ Adds(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
- __ B(&slow_start);
- }
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(x4);
+ __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
+ __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
+ // We have to update statistics for runtime profiling.
+ __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ __ Subs(x4, x4, Operand(Smi::FromInt(1)));
+ __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
+ __ Adds(x4, x4, Operand(Smi::FromInt(1)));
+ __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
+ __ B(&slow_start);
+
+ __ bind(&uninitialized);
+
+ // We are going monomorphic, provided we actually have a JSFunction.
+ __ JumpIfSmi(function, &miss);
+
+ // Goto miss case if we do not have a function.
+ __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
+
+ // Make sure the function is not the Array() function, which requires
special
+ // behavior on MISS.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
+ __ Cmp(function, x5);
+ __ B(eq, &miss);
+
+ // Update stats.
+ __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ __ Adds(x4, x4, Operand(Smi::FromInt(1)));
+ __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+
+ // Store the function.
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Str(function, FieldMemOperand(x4, FixedArray::kHeaderSize));
+
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(function, MemOperand(x4, 0));
- // We are here because tracing is on or we are going monomorphic.
+ // Update the write barrier.
+ __ Mov(x5, function);
+ __ RecordWrite(feedback_vector, x4, x5, kLRHasNotBeenSaved,
kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ B(&have_js_function);
+
+ // We are here because tracing is on or we encountered a MISS case we
can't
+ // handle here.
__ bind(&miss);
GenerateMiss(masm);
@@ -4293,18 +4334,10 @@
}
-static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
- // The entry hook is a "BumpSystemStackPointer" instruction (sub),
- // followed by a "Push lr" instruction, followed by a call.
- unsigned int size =
- Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
- // "BumpSystemStackPointer".
- size += kInstructionSize;
- }
- return size;
-}
+// The entry hook is a "BumpSystemStackPointer" instruction (sub),
followed by
+// a "Push lr" instruction, followed by a call.
+static const unsigned int kProfileEntryHookCallSize =
+ Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -4317,7 +4350,7 @@
__ Push(lr);
__ CallStub(&stub);
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
- GetProfileEntryHookCallSize(masm));
+ kProfileEntryHookCallSize);
__ Pop(lr);
}
@@ -4335,7 +4368,7 @@
const int kNumSavedRegs = kCallerSaved.Count();
// Compute the function's address as the first argument.
- __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
+ __ Sub(x0, lr, kProfileEntryHookCallSize);
#if V8_HOST_ARCH_ARM64
uintptr_t entry_hook =
=======================================
--- /trunk/src/arm64/lithium-codegen-arm64.cc Tue Oct 21 12:48:28 2014 UTC
+++ /trunk/src/arm64/lithium-codegen-arm64.cc Mon Nov 10 10:45:50 2014 UTC
@@ -557,11 +557,6 @@
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
-
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
=======================================
--- /trunk/src/arm64/macro-assembler-arm64-inl.h Tue Nov 4 01:04:58 2014
UTC
+++ /trunk/src/arm64/macro-assembler-arm64-inl.h Mon Nov 10 10:45:50 2014
UTC
@@ -1244,14 +1244,7 @@
void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
DCHECK(!csp.Is(sp_));
if (!TmpList()->IsEmpty()) {
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Sub(temp, StackPointer(), space);
- Bic(csp, temp, 0xf);
- } else {
- Sub(csp, StackPointer(), space);
- }
+ Sub(csp, StackPointer(), space);
} else {
// TODO(jbramley): Several callers rely on this not using scratch
// registers, so we use the assembler directly here. However, this
means
@@ -1288,11 +1281,7 @@
DCHECK(emit_debug_code());
DCHECK(!csp.Is(sp_));
{ InstructionAccurateScope scope(this);
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- bic(csp, StackPointer(), 0xf);
- } else {
- mov(csp, StackPointer());
- }
+ mov(csp, StackPointer());
}
AssertStackConsistency();
}
=======================================
--- /trunk/src/arm64/macro-assembler-arm64.cc Tue Oct 14 07:51:07 2014 UTC
+++ /trunk/src/arm64/macro-assembler-arm64.cc Mon Nov 10 10:45:50 2014 UTC
@@ -1308,7 +1308,7 @@
// Avoid emitting code when !use_real_abort() since non-real aborts
cause too
// much code to be generated.
if (emit_debug_code() && use_real_aborts()) {
- if (csp.Is(StackPointer()) ||
CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ if (csp.Is(StackPointer())) {
// Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
// can't check the alignment of csp without using a scratch register
(or
// clobbering the flags), but the processor (or simulator) will
abort if
=======================================
--- /trunk/src/arm64/macro-assembler-arm64.h Tue Nov 4 01:04:58 2014 UTC
+++ /trunk/src/arm64/macro-assembler-arm64.h Mon Nov 10 10:45:50 2014 UTC
@@ -761,9 +761,9 @@
// it can be evidence of a potential bug because the ABI forbids accesses
// below csp.
//
- // If StackPointer() is the system stack pointer (csp) or
ALWAYS_ALIGN_CSP is
- // enabled, then csp will be dereferenced to cause the processor
- // (or simulator) to abort if it is not properly aligned.
+ // If StackPointer() is the system stack pointer (csp), then csp will be
+ // dereferenced to cause the processor (or simulator) to abort if it is
not
+ // properly aligned.
//
// If emit_debug_code() is false, this emits no code.
void AssertStackConsistency();
@@ -831,9 +831,7 @@
inline void BumpSystemStackPointer(const Operand& space);
// Re-synchronizes the system stack pointer (csp) with the current stack
- // pointer (according to StackPointer()). This function will ensure the
- // new value of the system stack pointer is remains aligned to 16 bytes,
and
- // is lower than or equal to the value of the current stack pointer.
+ // pointer (according to StackPointer()).
//
// This method asserts that StackPointer() is not csp, since the call
does
// not make sense in that context.
=======================================
--- /trunk/src/ast.h Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/ast.h Mon Nov 10 10:45:50 2014 UTC
@@ -2576,6 +2576,12 @@
bool is_concise_method() {
return IsConciseMethod(FunctionKindBits::decode(bitfield_));
}
+ bool is_default_constructor() {
+ return IsDefaultConstructor(FunctionKindBits::decode(bitfield_));
+ }
+ bool is_default_constructor_call_super() {
+ return
IsDefaultConstructorCallSuper(FunctionKindBits::decode(bitfield_));
+ }
int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags* flags() { return ast_properties_.flags(); }
@@ -2647,7 +2653,7 @@
class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
class IsParenthesized : public BitField<IsParenthesizedFlag, 5, 1> {};
- class FunctionKindBits : public BitField<FunctionKind, 6, 3> {};
+ class FunctionKindBits : public BitField<FunctionKind, 6, 5> {};
};
=======================================
--- /trunk/src/base/platform/platform-win32.cc Wed Oct 29 14:24:00 2014 UTC
+++ /trunk/src/base/platform/platform-win32.cc Mon Nov 10 10:45:50 2014 UTC
@@ -344,14 +344,6 @@
DWORD elapsed = ticks_now - init_ticks;
this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
}
-
-
-int64_t FileTimeToInt64(FILETIME ft) {
- ULARGE_INTEGER result;
- result.LowPart = ft.dwLowDateTime;
- result.HighPart = ft.dwHighDateTime;
- return static_cast<int64_t>(result.QuadPart);
-}
// Return the local timezone offset in milliseconds east of UTC. This
@@ -360,12 +352,35 @@
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
- FILETIME local;
- SYSTEMTIME system_utc, system_local;
- FileTimeToSystemTime(&time_.ft_, &system_utc);
- SystemTimeToTzSpecificLocalTime(NULL, &system_utc, &system_local);
- SystemTimeToFileTime(&system_local, &local);
- return (FileTimeToInt64(local) - FileTimeToInt64(time_.ft_)) /
kTimeScaler;
+ cache->InitializeIfNeeded();
+
+ Win32Time rounded_to_second(*this);
+ rounded_to_second.t() =
+ rounded_to_second.t() / 1000 / kTimeScaler * 1000 * kTimeScaler;
+ // Convert to local time using POSIX localtime function.
+ // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
+ // very slow. Other browsers use localtime().
+
+ // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
+ // POSIX seconds past 1/1/1970 0:00:00.
+ double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
+ if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
+ return 0;
+ }
+ // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
+ time_t posix_time = static_cast<time_t>(unchecked_posix_time);
+
+ // Convert to local time, as struct with fields for day, hour, year, etc.
+ tm posix_local_time_struct;
+ if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
+
+ if (posix_local_time_struct.tm_isdst > 0) {
+ return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) *
-kMsPerMinute;
+ } else if (posix_local_time_struct.tm_isdst == 0) {
+ return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) *
-kMsPerMinute;
+ } else {
+ return cache->tzinfo_.Bias * -kMsPerMinute;
+ }
}
=======================================
--- /trunk/src/bootstrapper.cc Wed Nov 5 01:04:48 2014 UTC
+++ /trunk/src/bootstrapper.cc Mon Nov 10 10:45:50 2014 UTC
@@ -361,7 +361,7 @@
static void SetObjectPrototype(Handle<JSObject> object, Handle<Object>
proto) {
// object.__proto__ = proto;
Handle<Map> old_map = Handle<Map>(object->map());
- Handle<Map> new_map = Map::Copy(old_map);
+ Handle<Map> new_map = Map::Copy(old_map, "SetObjectPrototype");
new_map->set_prototype(*proto);
JSObject::MigrateToMap(object, new_map);
}
@@ -510,7 +510,8 @@
Handle<JSObject> prototype = factory->NewJSObject(
isolate->object_function(),
TENURED);
- Handle<Map> map = Map::Copy(handle(prototype->map()));
+ Handle<Map> map =
+ Map::Copy(handle(prototype->map()), "EmptyObjectPrototype");
map->set_is_prototype_map(true);
prototype->set_map(*map);
@@ -908,6 +909,10 @@
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
+ Handle<GlobalContextTable> global_context_table =
+ factory->NewGlobalContextTable();
+ native_context()->set_global_context_table(*global_context_table);
+
Handle<String> object_name = factory->Object_string();
JSObject::AddProperty(
global_object, object_name, isolate->object_function(), DONT_ENUM);
@@ -1089,7 +1094,7 @@
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
// RegExp prototype object is itself a RegExp.
- Handle<Map> proto_map = Map::Copy(initial_map);
+ Handle<Map> proto_map = Map::Copy(initial_map, "RegExpPrototype");
proto_map->set_prototype(native_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
@@ -1244,7 +1249,8 @@
}
{ // --- aliased arguments map
- Handle<Map> map = Map::Copy(isolate->sloppy_arguments_map());
+ Handle<Map> map =
+ Map::Copy(isolate->sloppy_arguments_map(), "AliasedArguments");
map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
DCHECK_EQ(2, map->pre_allocated_property_fields());
native_context()->set_aliased_arguments_map(*map);
@@ -1657,7 +1663,7 @@
array_function->shared()->DontAdaptArguments();
Handle<Map> original_map(array_function->initial_map());
- Handle<Map> initial_map = Map::Copy(original_map);
+ Handle<Map> initial_map = Map::Copy(original_map, "InternalArray");
initial_map->set_elements_kind(elements_kind);
JSFunction::SetInitialMap(array_function, initial_map, prototype);
@@ -1935,7 +1941,7 @@
// Create maps for generator functions and their prototypes. Store
those
// maps in the native context.
Handle<Map> generator_function_map =
- Map::Copy(sloppy_function_map_writable_prototype_);
+
Map::Copy(sloppy_function_map_writable_prototype_, "GeneratorFunction");
generator_function_map->set_prototype(*generator_function_prototype);
native_context()->set_sloppy_generator_function_map(
*generator_function_map);
@@ -1966,7 +1972,8 @@
rw_attribs, poison_pair);
Handle<Map>
strict_function_map(native_context()->strict_function_map());
- Handle<Map> strict_generator_function_map =
Map::Copy(strict_function_map);
+ Handle<Map> strict_generator_function_map =
+ Map::Copy(strict_function_map, "StrictGeneratorFunction");
// "arguments" and "caller" already poisoned.
strict_generator_function_map->set_prototype(*generator_function_prototype);
native_context()->set_strict_generator_function_map(
@@ -2712,6 +2719,15 @@
AddToWeakNativeContextList(*native_context());
isolate->set_context(*native_context());
isolate->counters()->contexts_created_by_snapshot()->Increment();
+#if TRACE_MAPS
+ if (FLAG_trace_maps) {
+ Handle<JSFunction> object_fun = isolate->object_function();
+ PrintF("[TraceMap: InitialMap map= %p SFI= %d_Object ]\n",
+ reinterpret_cast<void*>(object_fun->initial_map()),
+ object_fun->shared()->unique_id());
+ Map::TraceAllTransitions(object_fun->initial_map());
+ }
+#endif
Handle<GlobalObject> global_object;
Handle<JSGlobalProxy> global_proxy = CreateNewGlobals(
global_proxy_template, maybe_global_proxy, &global_object);
=======================================
--- /trunk/src/code-stubs.h Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/code-stubs.h Mon Nov 10 10:45:50 2014 UTC
@@ -577,10 +577,14 @@
bool is_arrow() const { return IsArrowFunction(kind()); }
bool is_generator() const { return IsGeneratorFunction(kind()); }
bool is_concise_method() const { return IsConciseMethod(kind()); }
+ bool is_default_constructor() const { return
IsDefaultConstructor(kind()); }
+ bool is_default_constructor_call_super() const {
+ return IsDefaultConstructorCallSuper(kind());
+ }
private:
class StrictModeBits : public BitField<StrictMode, 0, 1> {};
- class FunctionKindBits : public BitField<FunctionKind, 1, 3> {};
+ class FunctionKindBits : public BitField<FunctionKind, 1, 5> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
=======================================
--- /trunk/src/compiler/arm/code-generator-arm.cc Fri Nov 7 01:04:41 2014
UTC
+++ /trunk/src/compiler/arm/code-generator-arm.cc Mon Nov 10 10:45:50 2014
UTC
@@ -299,6 +299,42 @@
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmSxtb:
+ __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmSxth:
+ __ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmSxtab:
+ __ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmSxtah:
+ __ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmUxtb:
+ __ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmUxth:
+ __ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmUxtab:
+ __ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmUxtah:
+ __ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmCmp:
__ cmp(i.InputRegister(0), i.InputOperand2(1));
DCHECK_EQ(SetCC, i.OutputSBit());
=======================================
--- /trunk/src/compiler/arm/instruction-codes-arm.h Tue Nov 4 01:04:58
2014 UTC
+++ /trunk/src/compiler/arm/instruction-codes-arm.h Mon Nov 10 10:45:50
2014 UTC
@@ -35,6 +35,14 @@
V(ArmMvn) \
V(ArmBfc) \
V(ArmUbfx) \
+ V(ArmSxtb) \
+ V(ArmSxth) \
+ V(ArmSxtab) \
+ V(ArmSxtah) \
+ V(ArmUxtb) \
+ V(ArmUxth) \
+ V(ArmUxtab) \
+ V(ArmUxtah) \
V(ArmVcmpF64) \
V(ArmVaddF64) \
V(ArmVsubF64) \
=======================================
--- /trunk/src/compiler/arm/instruction-selector-arm.cc Tue Nov 4 01:04:58
2014 UTC
+++ /trunk/src/compiler/arm/instruction-selector-arm.cc Mon Nov 10 10:45:50
2014 UTC
@@ -91,6 +91,14 @@
case kArmUdiv:
case kArmBfc:
case kArmUbfx:
+ case kArmSxtb:
+ case kArmSxth:
+ case kArmSxtab:
+ case kArmSxtah:
+ case kArmUxtb:
+ case kArmUxth:
+ case kArmUxtab:
+ case kArmUxtah:
case kArmVcmpF64:
case kArmVaddF64:
case kArmVsubF64:
@@ -255,8 +263,20 @@
InstructionOperand* outputs[2];
size_t output_count = 0;
- if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
- &input_count, &inputs[1])) {
+ if (m.left().node() == m.right().node()) {
+ // If both inputs refer to the same operand, enforce allocating a
register
+ // for both of them to ensure that we don't end up generating code like
+ // this:
+ //
+ // mov r0, r1, asr #16
+ // adds r0, r0, r1, asr #16
+ // bvs label
+ InstructionOperand* const input = g.UseRegister(m.left().node());
+ opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[input_count++] = input;
+ inputs[input_count++] = input;
+ } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+ &input_count, &inputs[1])) {
inputs[0] = g.UseRegister(m.left().node());
input_count++;
} else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
@@ -430,12 +450,12 @@
return;
}
}
- if (IsSupported(ARMv7) && m.right().HasValue()) {
- // Try to interpret this AND as UBFX.
+ if (m.right().HasValue()) {
uint32_t const value = m.right().Value();
uint32_t width = base::bits::CountPopulation32(value);
uint32_t msb = base::bits::CountLeadingZeros32(value);
- if (width != 0 && msb + width == 32) {
+ // Try to interpret this AND as UBFX.
+ if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
if (m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
@@ -450,7 +470,6 @@
g.TempImmediate(0), g.TempImmediate(width));
return;
}
-
// Try to interpret this AND as BIC.
if (g.CanBeImmediate(~value)) {
Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
@@ -458,15 +477,22 @@
g.TempImmediate(~value));
return;
}
-
+ // Try to interpret this AND as UXTH.
+ if (value == 0xffff) {
+ Emit(kArmUxth, g.DefineAsRegister(m.node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(0));
+ return;
+ }
// Try to interpret this AND as BFC.
- width = 32 - width;
- msb = base::bits::CountLeadingZeros32(~value);
- uint32_t lsb = base::bits::CountTrailingZeros32(~value);
- if (msb + width + lsb == 32) {
- Emit(kArmBfc, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
- g.TempImmediate(lsb), g.TempImmediate(width));
- return;
+ if (IsSupported(ARMv7)) {
+ width = 32 - width;
+ msb = base::bits::CountLeadingZeros32(~value);
+ uint32_t lsb = base::bits::CountTrailingZeros32(~value);
+ if (msb + width + lsb == 32) {
+ Emit(kArmBfc, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
+ g.TempImmediate(lsb), g.TempImmediate(width));
+ return;
+ }
}
}
VisitBinop(this, node, kArmAnd, kArmAnd);
@@ -571,6 +597,20 @@
void InstructionSelector::VisitWord32Sar(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(16) && m.right().Is(16)) {
+ Emit(kArmSxth, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ } else if (mleft.right().Is(24) && m.right().Is(24)) {
+ Emit(kArmSxtb, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
VisitShift(this, node, TryMatchASR);
}
@@ -583,31 +623,113 @@
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
- if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
- Int32BinopMatcher mleft(m.left().node());
- Emit(kArmMla, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()),
g.UseRegister(m.right().node()));
- return;
- }
- if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
- Int32BinopMatcher mright(m.right().node());
- Emit(kArmMla, g.DefineAsRegister(node),
g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()),
g.UseRegister(m.left().node()));
- return;
- }
- if (m.left().IsInt32MulHigh() && CanCover(node, m.left().node())) {
- Int32BinopMatcher mleft(m.left().node());
- Emit(kArmSmmla, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()),
g.UseRegister(m.right().node()));
- return;
+ if (CanCover(node, m.left().node())) {
+ switch (m.left().opcode()) {
+ case IrOpcode::kInt32Mul: {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArmMla, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ case IrOpcode::kInt32MulHigh: {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArmSmmla, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(0xff)) {
+ Emit(kArmUxtab, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ } else if (mleft.right().Is(0xffff)) {
+ Emit(kArmUxtah, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
+ case IrOpcode::kWord32Sar: {
+ Int32BinopMatcher mleft(m.left().node());
+ if (CanCover(mleft.node(), mleft.left().node()) &&
+ mleft.left().IsWord32Shl()) {
+ Int32BinopMatcher mleftleft(mleft.left().node());
+ if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
+ Emit(kArmSxtab, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()),
+ g.UseRegister(mleftleft.left().node()),
g.TempImmediate(0));
+ return;
+ } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
+ Emit(kArmSxtah, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()),
+ g.UseRegister(mleftleft.left().node()),
g.TempImmediate(0));
+ return;
+ }
+ }
+ }
+ default:
+ break;
+ }
}
- if (m.right().IsInt32MulHigh() && CanCover(node, m.right().node())) {
- Int32BinopMatcher mright(m.right().node());
- Emit(kArmSmmla, g.DefineAsRegister(node),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()),
g.UseRegister(m.left().node()));
- return;
+ if (CanCover(node, m.right().node())) {
+ switch (m.right().opcode()) {
+ case IrOpcode::kInt32Mul: {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmMla, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
+ case IrOpcode::kInt32MulHigh: {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmSmmla, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().Is(0xff)) {
+ Emit(kArmUxtab, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()), g.TempImmediate(0));
+ return;
+ } else if (mright.right().Is(0xffff)) {
+ Emit(kArmUxtah, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
+ case IrOpcode::kWord32Sar: {
+ Int32BinopMatcher mright(m.right().node());
+ if (CanCover(mright.node(), mright.left().node()) &&
+ mright.left().IsWord32Shl()) {
+ Int32BinopMatcher mrightleft(mright.left().node());
+ if (mright.right().Is(24) && mrightleft.right().Is(24)) {
+ Emit(kArmSxtab, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mrightleft.left().node()),
g.TempImmediate(0));
+ return;
+ } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
+ Emit(kArmSxtah, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mrightleft.left().node()),
g.TempImmediate(0));
+ return;
+ }
+ }
+ }
+ default:
+ break;
+ }
}
VisitBinop(this, node, kArmAdd, kArmAdd);
}
=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64.cc Tue Nov 4
01:04:58 2014 UTC
+++ /trunk/src/compiler/arm64/instruction-selector-arm64.cc Mon Nov 10
10:45:50 2014 UTC
@@ -542,6 +542,17 @@
void InstructionSelector::VisitWord64Shl(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() ||
m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63)) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the
upper
+ // 32 bits anyway.
+ Emit(kArm64Lsl, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
VisitRRO(this, kArm64Lsl, node, kShift64Imm);
}
@@ -884,6 +895,18 @@
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ Int64BinopMatcher m(value);
+ if ((m.IsWord64Sar() && m.right().HasValue() &&
+ (m.right().Value() == 32)) ||
+ (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
+ Emit(kArm64Lsr, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+
Emit(kArm64Mov32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
=======================================
--- /trunk/src/compiler/code-generator.cc Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/compiler/code-generator.cc Mon Nov 10 10:45:50 2014 UTC
@@ -422,7 +422,8 @@
if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
type == kMachInt16) {
translation->StoreInt32StackSlot(op->index());
- } else if (type == kMachUint32) {
+ } else if (type == kMachUint32 || type == kMachUint16 ||
+ type == kMachUint8) {
translation->StoreUint32StackSlot(op->index());
} else if ((type & kRepMask) == kRepTagged) {
translation->StoreStackSlot(op->index());
@@ -437,7 +438,8 @@
if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
type == kMachInt16) {
translation->StoreInt32Register(converter.ToRegister(op));
- } else if (type == kMachUint32) {
+ } else if (type == kMachUint32 || type == kMachUint16 ||
+ type == kMachUint8) {
translation->StoreUint32Register(converter.ToRegister(op));
} else if ((type & kRepMask) == kRepTagged) {
translation->StoreRegister(converter.ToRegister(op));
=======================================
--- /trunk/src/compiler/js-generic-lowering.cc Tue Nov 4 01:04:58 2014 UTC
+++ /trunk/src/compiler/js-generic-lowering.cc Mon Nov 10 10:45:50 2014 UTC
@@ -223,7 +223,7 @@
linkage()->GetRuntimeCallDescriptor(f, nargs, properties);
Node* ref = ExternalConstant(ExternalReference(f, isolate()));
Node* arity = Int32Constant(nargs);
- PatchInsertInput(node, 0, jsgraph()->CEntryStubConstant());
+ PatchInsertInput(node, 0,
jsgraph()->CEntryStubConstant(fun->result_size));
PatchInsertInput(node, nargs + 1, ref);
PatchInsertInput(node, nargs + 2, arity);
PatchOperator(node, common()->Call(desc));
=======================================
--- /trunk/src/compiler/js-graph.cc Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/js-graph.cc Mon Nov 10 10:45:50 2014 UTC
@@ -17,12 +17,16 @@
}
-Node* JSGraph::CEntryStubConstant() {
- if (!c_entry_stub_constant_.is_set()) {
- c_entry_stub_constant_.set(
- ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+Node* JSGraph::CEntryStubConstant(int result_size) {
+ if (result_size == 1) {
+ if (!c_entry_stub_constant_.is_set()) {
+ c_entry_stub_constant_.set(
+ ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+ }
+ return c_entry_stub_constant_.get();
}
- return c_entry_stub_constant_.get();
+
+ return ImmovableHeapConstant(CEntryStub(isolate(),
result_size).GetCode());
}
=======================================
--- /trunk/src/compiler/js-graph.h Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/js-graph.h Mon Nov 10 10:45:50 2014 UTC
@@ -32,7 +32,7 @@
cache_(zone()) {}
// Canonicalized global constants.
- Node* CEntryStubConstant();
+ Node* CEntryStubConstant(int result_size);
Node* UndefinedConstant();
Node* TheHoleConstant();
Node* TrueConstant();
=======================================
--- /trunk/src/compiler/linkage.cc Tue Nov 4 01:04:58 2014 UTC
+++ /trunk/src/compiler/linkage.cc Mon Nov 10 10:45:50 2014 UTC
@@ -186,6 +186,7 @@
case Runtime::kRegExpCompile:
case Runtime::kRegExpExecMultiple:
case Runtime::kResolvePossiblyDirectEval:
+ case Runtime::kRunMicrotasks:
case Runtime::kSetPrototype:
case Runtime::kSetScriptBreakPoint:
case Runtime::kSparseJoinWithSeparator:
=======================================
--- /trunk/src/compiler/node-matchers.h Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/node-matchers.h Mon Nov 10 10:45:50 2014 UTC
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_NODE_MATCHERS_H_
#define V8_COMPILER_NODE_MATCHERS_H_
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/unique.h"
@@ -116,7 +118,7 @@
// right hand sides of a binary operation and can put constants on the
right
// if they appear on the left hand side of a commutative operation.
template <typename Left, typename Right>
-struct BinopMatcher FINAL : public NodeMatcher {
+struct BinopMatcher : public NodeMatcher {
explicit BinopMatcher(Node* node)
: NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
@@ -127,13 +129,18 @@
bool IsFoldable() const { return left().HasValue() &&
right().HasValue(); }
bool LeftEqualsRight() const { return left().node() == right().node(); }
+
+ protected:
+ void SwapInputs() {
+ std::swap(left_, right_);
+ node()->ReplaceInput(0, left().node());
+ node()->ReplaceInput(1, right().node());
+ }
private:
void PutConstantOnRight() {
if (left().HasValue() && !right().HasValue()) {
- std::swap(left_, right_);
- node()->ReplaceInput(0, left().node());
- node()->ReplaceInput(1, right().node());
+ SwapInputs();
}
}
@@ -150,6 +157,189 @@
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
+struct Int32AddMatcher : public Int32BinopMatcher {
+ explicit Int32AddMatcher(Node* node)
+ : Int32BinopMatcher(node), scale_exponent_(-1) {
+ PutScaledInputOnLeft();
+ }
+
+ bool HasScaledInput() const { return scale_exponent_ != -1; }
+ Node* ScaledInput() const {
+ DCHECK(HasScaledInput());
+ return left().node()->InputAt(0);
+ }
+ int ScaleExponent() const {
+ DCHECK(HasScaledInput());
+ return scale_exponent_;
+ }
+
+ private:
+ int GetInputScaleExponent(Node* node) const {
+ if (node->opcode() == IrOpcode::kWord32Shl) {
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue()) {
+ int32_t value = m.right().Value();
+ if (value >= 0 && value <= 3) {
+ return value;
+ }
+ }
+ } else if (node->opcode() == IrOpcode::kInt32Mul) {
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue()) {
+ int32_t value = m.right().Value();
+ if (value == 1) {
+ return 0;
+ } else if (value == 2) {
+ return 1;
+ } else if (value == 4) {
+ return 2;
+ } else if (value == 8) {
+ return 3;
+ }
+ }
+ }
+ return -1;
+ }
+
+ void PutScaledInputOnLeft() {
+ scale_exponent_ = GetInputScaleExponent(right().node());
+ if (scale_exponent_ >= 0) {
+ int left_scale_exponent = GetInputScaleExponent(left().node());
+ if (left_scale_exponent == -1) {
+ SwapInputs();
+ } else {
+ scale_exponent_ = left_scale_exponent;
+ }
+ } else {
+ scale_exponent_ = GetInputScaleExponent(left().node());
+ if (scale_exponent_ == -1) {
+ if (right().opcode() == IrOpcode::kInt32Add &&
+ left().opcode() != IrOpcode::kInt32Add) {
+ SwapInputs();
+ }
+ }
+ }
+ }
+
+ int scale_exponent_;
+};
+
+struct ScaledWithOffsetMatcher {
+ explicit ScaledWithOffsetMatcher(Node* node)
+ : matches_(false),
+ scaled_(NULL),
+ scale_exponent_(0),
+ offset_(NULL),
+ constant_(NULL) {
+ if (node->opcode() != IrOpcode::kInt32Add) return;
+
+ // The Int32AddMatcher canonicalizes the order of constants and scale
+ // factors that are used as inputs, so instead of enumerating all
possible
+ // patterns by brute force, checking for node clusters using the
following
+ // templates in the following order suffices to find all of the
interesting
+ // cases (S = scaled input, O = offset input, C = constant input):
+ // (S + (O + C))
+ // (S + (O + O))
+ // (S + C)
+ // (S + O)
+ // ((S + C) + O)
+ // ((S + O) + C)
+ // ((O + C) + O)
+ // ((O + O) + C)
+ // (O + C)
+ // (O + O)
+ Int32AddMatcher base_matcher(node);
+ Node* left = base_matcher.left().node();
+ Node* right = base_matcher.right().node();
+ if (base_matcher.HasScaledInput() && left->OwnedBy(node)) {
+ scaled_ = base_matcher.ScaledInput();
+ scale_exponent_ = base_matcher.ScaleExponent();
+ if (right->opcode() == IrOpcode::kInt32Add && right->OwnedBy(node)) {
+ Int32AddMatcher right_matcher(right);
+ if (right_matcher.right().HasValue()) {
+ // (S + (O + C))
+ offset_ = right_matcher.left().node();
+ constant_ = right_matcher.right().node();
+ } else {
+ // (S + (O + O))
+ offset_ = right;
+ }
+ } else if (base_matcher.right().HasValue()) {
+ // (S + C)
+ constant_ = right;
+ } else {
+ // (S + O)
+ offset_ = right;
+ }
+ } else {
+ if (left->opcode() == IrOpcode::kInt32Add && left->OwnedBy(node)) {
+ Int32AddMatcher left_matcher(left);
+ Node* left_left = left_matcher.left().node();
+ Node* left_right = left_matcher.right().node();
+ if (left_matcher.HasScaledInput() && left_left->OwnedBy(left)) {
+ scaled_ = left_matcher.ScaledInput();
+ scale_exponent_ = left_matcher.ScaleExponent();
+ if (left_matcher.right().HasValue()) {
+ // ((S + C) + O)
+ constant_ = left_right;
+ offset_ = right;
+ } else if (base_matcher.right().HasValue()) {
+ // ((S + O) + C)
+ offset_ = left_right;
+ constant_ = right;
+ } else {
+ // (O + O)
+ scaled_ = left;
+ offset_ = right;
+ }
+ } else {
+ if (left_matcher.right().HasValue()) {
+ // ((O + C) + O)
+ scaled_ = left_left;
+ constant_ = left_right;
+ offset_ = right;
+ } else if (base_matcher.right().HasValue()) {
+ // ((O + O) + C)
+ scaled_ = left_left;
+ offset_ = left_right;
+ constant_ = right;
+ } else {
+ // (O + O)
+ scaled_ = left;
+ offset_ = right;
+ }
+ }
+ } else {
+ if (base_matcher.right().HasValue()) {
+ // (O + C)
+ offset_ = left;
+ constant_ = right;
+ } else {
+ // (O + O)
+ offset_ = left;
+ scaled_ = right;
+ }
+ }
+ }
+ matches_ = true;
+ }
+
+ bool matches() const { return matches_; }
+ Node* scaled() const { return scaled_; }
+ int scale_exponent() const { return scale_exponent_; }
+ Node* offset() const { return offset_; }
+ Node* constant() const { return constant_; }
+
+ private:
+ bool matches_;
+
+ protected:
+ Node* scaled_;
+ int scale_exponent_;
+ Node* offset_;
+ Node* constant_;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
=======================================
--- /trunk/src/compiler/schedule.h Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/compiler/schedule.h Mon Nov 10 10:45:50 2014 UTC
@@ -265,8 +265,6 @@
private:
friend class Scheduler;
- friend class CodeGenerator;
- friend class ScheduleVisualizer;
friend class BasicBlockInstrumentor;
void AddSuccessor(BasicBlock* block, BasicBlock* succ);
=======================================
--- /trunk/src/compiler/scheduler.cc Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/compiler/scheduler.cc Mon Nov 10 10:45:50 2014 UTC
@@ -685,7 +685,7 @@
stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
previous_block_count_ = schedule_->BasicBlockCount();
int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
- int num_loops = 0;
+ int num_loops = static_cast<int>(loops_.size());
while (stack_depth > 0) {
int current = stack_depth - 1;
@@ -717,7 +717,7 @@
}
// If no loops were encountered, then the order we computed was
correct.
- if (num_loops != 0) {
+ if (num_loops > static_cast<int>(loops_.size())) {
// Otherwise, compute the loop information from the backedges in
order
// to perform a traversal that groups loop bodies together.
ComputeLoopInfo(stack_, num_loops, &backedges_);
@@ -725,7 +725,7 @@
// Initialize the "loop stack". Note the entry could be a loop
header.
LoopInfo* loop =
HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
- order = NULL;
+ order = insert_after;
// Perform an iterative post-order traversal, visiting loop bodies
before
// edges that lead out of loops. Visits each block once, but linking
loop
@@ -737,7 +737,7 @@
BasicBlock* block = frame->block;
BasicBlock* succ = NULL;
- if (frame->index < block->SuccessorCount()) {
+ if (block != end && frame->index < block->SuccessorCount()) {
// Process the next normal successor.
succ = block->SuccessorAt(frame->index++);
} else if (HasLoopNumber(block)) {
=======================================
--- /trunk/src/compiler/x64/instruction-selector-x64.cc Fri Nov 7 01:04:41
2014 UTC
+++ /trunk/src/compiler/x64/instruction-selector-x64.cc Mon Nov 10 10:45:50
2014 UTC
@@ -364,8 +364,79 @@
VisitWord64Shift(this, node, kX64Ror);
}
+namespace {
+
+AddressingMode GenerateMemoryOperandInputs(X64OperandGenerator* g, Node*
scaled,
+ int scale_exponent, Node*
offset,
+ Node* constant,
+ InstructionOperand* inputs[],
+ size_t* input_count) {
+ AddressingMode mode = kMode_MRI;
+ if (offset != NULL) {
+ inputs[(*input_count)++] = g->UseRegister(offset);
+ if (scaled != NULL) {
+ DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+ inputs[(*input_count)++] = g->UseRegister(scaled);
+ if (constant != NULL) {
+ inputs[(*input_count)++] = g->UseImmediate(constant);
+ static const AddressingMode kMRnI_modes[] = {kMode_MR1I,
kMode_MR2I,
+ kMode_MR4I,
kMode_MR8I};
+ mode = kMRnI_modes[scale_exponent];
+ } else {
+ static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
+ kMode_MR4, kMode_MR8};
+ mode = kMRn_modes[scale_exponent];
+ }
+ } else {
+ DCHECK(constant != NULL);
+ inputs[(*input_count)++] = g->UseImmediate(constant);
+ mode = kMode_MRI;
+ }
+ } else {
+ DCHECK(scaled != NULL);
+ DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+ inputs[(*input_count)++] = g->UseRegister(scaled);
+ if (constant != NULL) {
+ inputs[(*input_count)++] = g->UseImmediate(constant);
+ static const AddressingMode kMnI_modes[] = {kMode_M1I, kMode_M2I,
+ kMode_M4I, kMode_M8I};
+ mode = kMnI_modes[scale_exponent];
+ } else {
+ static const AddressingMode kMn_modes[] = {kMode_M1, kMode_M2,
kMode_M4,
+ kMode_M8};
+ mode = kMn_modes[scale_exponent];
+ }
+ }
+ return mode;
+}
+
+} // namespace
+
void InstructionSelector::VisitInt32Add(Node* node) {
+ // Try to match the Add to a leal pattern
+ ScaledWithOffsetMatcher m(node);
+ X64OperandGenerator g(this);
+ if (m.matches() && (m.constant() == NULL ||
g.CanBeImmediate(m.constant()))) {
+ InstructionOperand* inputs[4];
+ size_t input_count = 0;
+
+ AddressingMode mode = GenerateMemoryOperandInputs(
+ &g, m.scaled(), m.scale_exponent(), m.offset(), m.constant(),
inputs,
+ &input_count);
+
+ DCHECK_NE(0, static_cast<int>(input_count));
+ DCHECK_GE(arraysize(inputs), input_count);
+
+ InstructionOperand* outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+
+ InstructionCode opcode = AddressingModeField::encode(mode) | kX64Lea32;
+
+ Emit(opcode, 1, outputs, input_count, inputs);
+ return;
+ }
+
VisitBinop(this, node, kX64Add32);
}
=======================================
--- /trunk/src/contexts.cc Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/contexts.cc Mon Nov 10 10:45:50 2014 UTC
@@ -11,6 +11,48 @@
namespace v8 {
namespace internal {
+
+Handle<GlobalContextTable> GlobalContextTable::Extend(
+ Handle<GlobalContextTable> table, Handle<Context> global_context) {
+ Handle<GlobalContextTable> result;
+ int used = table->used();
+ int length = table->length();
+ CHECK(used >= 0 && length > 0 && used < length);
+ if (used + 1 == length) {
+ CHECK(length < Smi::kMaxValue / 2);
+ result = Handle<GlobalContextTable>::cast(
+ FixedArray::CopySize(table, length * 2));
+ } else {
+ result = table;
+ }
+ result->set_used(used + 1);
+
+ DCHECK(global_context->IsGlobalContext());
+ result->set(used + 1, *global_context);
+ return result;
+}
+
+
+bool GlobalContextTable::Lookup(Handle<GlobalContextTable> table,
+ Handle<String> name, LookupResult* result)
{
+ for (int i = 0; i < table->used(); i++) {
+ Handle<Context> context = GetContext(table, i);
+ DCHECK(context->IsGlobalContext());
+ Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+ int slot_index = ScopeInfo::ContextSlotIndex(
+ scope_info, name, &result->mode, &result->init_flag,
+ &result->maybe_assigned_flag);
+
+ if (slot_index >= 0) {
+ result->context_index = i;
+ result->slot_index = slot_index;
+ return true;
+ }
+ }
+ return false;
+}
+
+
Context* Context::declaration_context() {
Context* current = this;
while (!current->IsFunctionContext() && !current->IsNativeContext()) {
@@ -101,6 +143,53 @@
if (blacklist.value) return maybe(ABSENT);
return attrs;
}
+
+static void GetAttributesAndBindingFlags(VariableMode mode,
+ InitializationFlag init_flag,
+ PropertyAttributes* attributes,
+ BindingFlags* binding_flags) {
+ switch (mode) {
+ case INTERNAL: // Fall through.
+ case VAR:
+ *attributes = NONE;
+ *binding_flags = MUTABLE_IS_INITIALIZED;
+ break;
+ case LET:
+ *attributes = NONE;
+ *binding_flags = (init_flag == kNeedsInitialization)
+ ? MUTABLE_CHECK_INITIALIZED
+ : MUTABLE_IS_INITIALIZED;
+ break;
+ case CONST_LEGACY:
+ *attributes = READ_ONLY;
+ *binding_flags = (init_flag == kNeedsInitialization)
+ ? IMMUTABLE_CHECK_INITIALIZED
+ : IMMUTABLE_IS_INITIALIZED;
+ break;
+ case CONST:
+ *attributes = READ_ONLY;
+ *binding_flags = (init_flag == kNeedsInitialization)
+ ? IMMUTABLE_CHECK_INITIALIZED_HARMONY
+ : IMMUTABLE_IS_INITIALIZED_HARMONY;
+ break;
+ case MODULE:
+ *attributes = READ_ONLY;
+ *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
+ break;
+ case DYNAMIC:
+ case DYNAMIC_GLOBAL:
+ case DYNAMIC_LOCAL:
+ case TEMPORARY:
+ // Note: Fixed context slots are statically allocated by the
compiler.
+ // Statically allocated variables always have a statically known
mode,
+ // which is the mode with which they were declared when added to the
+ // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
+ // declared variables that were introduced through declaration nodes)
+ // must not appear here.
+ UNREACHABLE();
+ break;
+ }
+}
Handle<Object> Context::Lookup(Handle<String> name,
@@ -121,8 +210,6 @@
name->ShortPrint();
PrintF(")\n");
}
-
- bool visited_global_context = false;
do {
if (FLAG_trace_contexts) {
@@ -132,19 +219,6 @@
PrintF("\n");
}
- if (follow_context_chain && FLAG_harmony_scoping &&
- !visited_global_context &&
- (context->IsGlobalContext() || context->IsNativeContext())) {
- // For lexical scoping, on a top level, we might resolve to the
- // lexical bindings introduced by later scrips. Therefore we need to
- // switch to the the last added global context during lookup here.
- context =
Handle<Context>(context->global_object()->global_context());
- visited_global_context = true;
- if (FLAG_trace_contexts) {
- PrintF(" - switching to current global context %p\n",
- reinterpret_cast<void*>(*context));
- }
- }
// 1. Check global objects, subjects of with, and extension objects.
if (context->IsNativeContext() ||
@@ -152,6 +226,30 @@
(context->IsFunctionContext() && context->has_extension())) {
Handle<JSReceiver> object(
JSReceiver::cast(context->extension()), isolate);
+
+ if (context->IsNativeContext()) {
+ if (FLAG_trace_contexts) {
+ PrintF(" - trying other global contexts\n");
+ }
+ // Try other global contexts.
+ Handle<GlobalContextTable> global_contexts(
+
context->global_object()->native_context()->global_context_table());
+ GlobalContextTable::LookupResult r;
+ if (GlobalContextTable::Lookup(global_contexts, name, &r)) {
+ if (FLAG_trace_contexts) {
+ Handle<Context> c =
GlobalContextTable::GetContext(global_contexts,
+
r.context_index);
+ PrintF("=> found property in global context %d: %p\n",
+ r.context_index, reinterpret_cast<void*>(*c));
+ }
+ *index = r.slot_index;
+ GetAttributesAndBindingFlags(r.mode, r.init_flag, attributes,
+ binding_flags);
+ return GlobalContextTable::GetContext(global_contexts,
+ r.context_index);
+ }
+ }
+
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we need
// to only do a local lookup for context extension objects.
@@ -206,45 +304,8 @@
slot_index, mode);
}
*index = slot_index;
- // Note: Fixed context slots are statically allocated by the
compiler.
- // Statically allocated variables always have a statically known
mode,
- // which is the mode with which they were declared when added to
the
- // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
- // declared variables that were introduced through declaration
nodes)
- // must not appear here.
- switch (mode) {
- case INTERNAL: // Fall through.
- case VAR:
- *attributes = NONE;
- *binding_flags = MUTABLE_IS_INITIALIZED;
- break;
- case LET:
- *attributes = NONE;
- *binding_flags = (init_flag == kNeedsInitialization)
- ? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
- break;
- case CONST_LEGACY:
- *attributes = READ_ONLY;
- *binding_flags = (init_flag == kNeedsInitialization)
- ? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
- break;
- case CONST:
- *attributes = READ_ONLY;
- *binding_flags = (init_flag == kNeedsInitialization)
- ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
- IMMUTABLE_IS_INITIALIZED_HARMONY;
- break;
- case MODULE:
- *attributes = READ_ONLY;
- *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
- break;
- case DYNAMIC:
- case DYNAMIC_GLOBAL:
- case DYNAMIC_LOCAL:
- case TEMPORARY:
- UNREACHABLE();
- break;
- }
+ GetAttributesAndBindingFlags(mode, init_flag, attributes,
+ binding_flags);
return context;
}
=======================================
--- /trunk/src/contexts.h Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/contexts.h Mon Nov 10 10:45:50 2014 UTC
@@ -183,7 +183,57 @@
V(SET_ITERATOR_MAP_INDEX, Map,
set_iterator_map) \
V(ITERATOR_SYMBOL_INDEX, Symbol,
iterator_symbol) \
V(UNSCOPABLES_SYMBOL_INDEX, Symbol,
unscopables_symbol) \
- V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction,
array_values_iterator) \
+ V(GLOBAL_CONTEXT_TABLE_INDEX, GlobalContextTable, global_context_table)
+
+
+// A table of all global contexts. Every loaded top-level script with
top-level
+// lexical declarations contributes its GlobalContext into this table.
+//
+// The table is a fixed array, its first slot is the current used count and
+// the subsequent slots 1..used contain GlobalContexts.
+class GlobalContextTable : public FixedArray {
+ public:
+ // Conversions.
+ static GlobalContextTable* cast(Object* context) {
+ DCHECK(context->IsGlobalContextTable());
+ return reinterpret_cast<GlobalContextTable*>(context);
+ }
+
+ struct LookupResult {
+ int context_index;
+ int slot_index;
+ VariableMode mode;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ };
+
+ int used() const { return Smi::cast(get(kUsedSlot))->value(); }
+
+ void set_used(int used) { set(kUsedSlot, Smi::FromInt(used)); }
+
+ static Handle<Context> GetContext(Handle<GlobalContextTable> table, int
i) {
+ DCHECK(i < table->used());
+ return Handle<Context>::cast(FixedArray::get(table, i + 1));
+ }
+
+ // Lookup a variable `name` in a GlobalContextTable.
+ // If it returns true, the variable is found and `result` contains
+ // valid information about its location.
+ // If it returns false, `result` is untouched.
+ MUST_USE_RESULT
+ static bool Lookup(Handle<GlobalContextTable> table, Handle<String> name,
+ LookupResult* result);
+
+ MUST_USE_RESULT
+ static Handle<GlobalContextTable> Extend(Handle<GlobalContextTable>
table,
+ Handle<Context> global_context);
+
+ private:
+ static const int kUsedSlot = 0;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalContextTable);
+};
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@@ -229,6 +279,8 @@
//
// Finally, with Harmony scoping, the JSFunction representing a top level
// script will have the GlobalContext rather than a FunctionContext.
+// Global contexts from all top-level scripts are gathered in
+// GlobalContextTable.
class Context: public FixedArray {
public:
@@ -360,6 +412,7 @@
ITERATOR_SYMBOL_INDEX,
UNSCOPABLES_SYMBOL_INDEX,
ARRAY_VALUES_ITERATOR_INDEX,
+ GLOBAL_CONTEXT_TABLE_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
=======================================
--- /trunk/src/factory.cc Wed Nov 5 01:04:48 2014 UTC
+++ /trunk/src/factory.cc Mon Nov 10 10:45:50 2014 UTC
@@ -706,6 +706,16 @@
DCHECK(context->IsGlobalContext());
return context;
}
+
+
+Handle<GlobalContextTable> Factory::NewGlobalContextTable() {
+ Handle<FixedArray> array = NewFixedArray(1);
+ array->set_map_no_write_barrier(*global_context_table_map());
+ Handle<GlobalContextTable> context_table =
+ Handle<GlobalContextTable>::cast(array);
+ context_table->set_used(0);
+ return context_table;
+}
Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
@@ -2077,6 +2087,9 @@
share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
Handle<TypeFeedbackVector> feedback_vector = NewTypeFeedbackVector(0, 0);
share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
+#if TRACE_MAPS
+ share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
+#endif
share->set_profiler_ticks(0);
share->set_ast_node_count(0);
share->set_counters(0);
=======================================
--- /trunk/src/factory.h Wed Nov 5 01:04:48 2014 UTC
+++ /trunk/src/factory.h Mon Nov 10 10:45:50 2014 UTC
@@ -229,6 +229,9 @@
Handle<Context> NewGlobalContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info);
+ // Create an empty global context table.
+ Handle<GlobalContextTable> NewGlobalContextTable();
+
// Create a module context.
Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
=======================================
--- /trunk/src/flag-definitions.h Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/flag-definitions.h Mon Nov 10 10:45:50 2014 UTC
@@ -438,11 +438,6 @@
DEFINE_BOOL(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS only)")
-// cpu-arm64.cc
-DEFINE_BOOL(enable_always_align_csp, true,
- "enable alignment of csp to 16 bytes on platforms which
prefer "
- "the register to always be aligned (ARM64 only)")
-
// bootstrapper.cc
DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object")
@@ -620,6 +615,9 @@
// objects.cc
DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
+#if TRACE_MAPS
+DEFINE_BOOL(trace_maps, false, "trace map creation")
+#endif
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
=======================================
--- /trunk/src/full-codegen.cc Sat Nov 1 22:28:42 2014 UTC
+++ /trunk/src/full-codegen.cc Mon Nov 10 10:45:50 2014 UTC
@@ -1575,11 +1575,7 @@
__ Push(isolate()->factory()->the_hole_value());
}
- if (lit->constructor() != NULL) {
- VisitForStackValue(lit->constructor());
- } else {
- __ Push(isolate()->factory()->undefined_value());
- }
+ VisitForStackValue(lit->constructor());
__ Push(script());
__ Push(Smi::FromInt(lit->start_position()));
=======================================
--- /trunk/src/globals.h Fri Nov 7 01:04:41 2014 UTC
+++ /trunk/src/globals.h Mon Nov 10 10:45:50 2014 UTC
@@ -776,7 +776,9 @@
kArrowFunction = 1,
kGeneratorFunction = 2,
kConciseMethod = 4,
- kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod
+ kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
+ kDefaultConstructor = 8,
+ kDefaultConstructorCallSuper = 16
};
@@ -785,7 +787,9 @@
kind == FunctionKind::kArrowFunction ||
kind == FunctionKind::kGeneratorFunction ||
kind == FunctionKind::kConciseMethod ||
- kind == FunctionKind::kConciseGeneratorMethod;
+ kind == FunctionKind::kConciseGeneratorMethod ||
+ kind == FunctionKind::kDefaultConstructor ||
+ kind == FunctionKind::kDefaultConstructorCallSuper;
}
@@ -805,6 +809,18 @@
DCHECK(IsValidFunctionKind(kind));
return kind & FunctionKind::kConciseMethod;
}
+
+
+inline bool IsDefaultConstructor(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kDefaultConstructor;
+}
+
+
+inline bool IsDefaultConstructorCallSuper(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kDefaultConstructorCallSuper;
+}
} } // namespace v8::internal
namespace i = v8::internal;
=======================================
--- /trunk/src/heap/gc-idle-time-handler.cc Wed Oct 22 07:27:53 2014 UTC
+++ /trunk/src/heap/gc-idle-time-handler.cc Mon Nov 10 10:45:50 2014 UTC
@@ -14,6 +14,7 @@
const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
+const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
void GCIdleTimeAction::Print() {
@@ -129,14 +130,12 @@
// (2) If the new space is almost full and we can affort a Scavenge or if
the
// next Scavenge will very likely take long, then a Scavenge is performed.
// (3) If there is currently no MarkCompact idle round going on, we start a
-// new idle round if enough garbage was created or we received a context
-// disposal event. Otherwise we do not perform garbage collection to keep
-// system utilization low.
+// new idle round if enough garbage was created. Otherwise we do not
perform
+// garbage collection to keep system utilization low.
// (4) If incremental marking is done, we perform a full garbage collection
-// if context was disposed or if we are allowed to still do full garbage
-// collections during this idle round or if we are not allowed to start
-// incremental marking. Otherwise we do not perform garbage collection to
-// keep system utilization low.
+// if we are allowed to still do full garbage collections during this idle
+// round or if we are not allowed to start incremental marking. Otherwise
we
+// do not perform garbage collection to keep system utilization low.
// (5) If sweeping is in progress and we received a large enough idle time
// request, we finalize sweeping here.
// (6) If incremental marking is in progress, we perform a marking step.
Note,
@@ -145,8 +144,8 @@
HeapState heap_state) {
if (idle_time_in_ms == 0) {
if (heap_state.incremental_marking_stopped) {
- if (heap_state.size_of_objects < kSmallHeapSize &&
- heap_state.contexts_disposed > 0) {
+ if (heap_state.contexts_disposed > 0 &&
+ heap_state.contexts_disposal_rate < kHighContextDisposalRate) {
return GCIdleTimeAction::FullGC();
}
}
@@ -162,7 +161,7 @@
}
if (IsMarkCompactIdleRoundFinished()) {
- if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed
0) {
+ if (EnoughGarbageSinceLastIdleRound()) {
StartIdleRound();
} else {
return GCIdleTimeAction::Done();
@@ -170,11 +169,8 @@
}
if (heap_state.incremental_marking_stopped) {
- // TODO(jochen): Remove context disposal dependant logic.
if (ShouldDoMarkCompact(idle_time_in_ms, heap_state.size_of_objects,
- heap_state.mark_compact_speed_in_bytes_per_ms)
||
- (heap_state.size_of_objects < kSmallHeapSize &&
- heap_state.contexts_disposed > 0)) {
+
heap_state.mark_compact_speed_in_bytes_per_ms)) {
// If there are no more than two GCs left in this idle round and we
are
// allowed to do a full GC, then make those GCs full in order to
compact
// the code space.
@@ -182,10 +178,9 @@
// can get rid of this special case and always start incremental
marking.
int remaining_mark_sweeps =
kMaxMarkCompactsInIdleRound -
mark_compacts_since_idle_round_started_;
- if (heap_state.contexts_disposed > 0 ||
- (idle_time_in_ms > kMaxFrameRenderingIdleTime &&
- (remaining_mark_sweeps <= 2 ||
- !heap_state.can_start_incremental_marking))) {
+ if (idle_time_in_ms > kMaxFrameRenderingIdleTime &&
+ (remaining_mark_sweeps <= 2 ||
+ !heap_state.can_start_incremental_marking)) {
return GCIdleTimeAction::FullGC();
}
}
=======================================
--- /trunk/src/heap/gc-idle-time-handler.h Tue Oct 14 07:51:07 2014 UTC
+++ /trunk/src/heap/gc-idle-time-handler.h Mon Nov 10 10:45:50 2014 UTC
@@ -106,10 +106,6 @@
// Number of scavenges that will trigger start of new idle round.
static const int kIdleScavengeThreshold;
- // Heap size threshold below which we prefer mark-compact over
incremental
- // step.
- static const size_t kSmallHeapSize = 4 * kPointerSize * MB;
-
// That is the maximum idle time we will have during frame rendering.
static const size_t kMaxFrameRenderingIdleTime = 16;
@@ -117,8 +113,12 @@
// lower bound for the scavenger speed.
static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
+ // If contexts are disposed at a higher rate a full gc is triggered.
+ static const double kHighContextDisposalRate;
+
struct HeapState {
int contexts_disposed;
+ double contexts_disposal_rate;
size_t size_of_objects;
bool incremental_marking_stopped;
bool can_start_incremental_marking;
=======================================
--- /trunk/src/heap/gc-tracer.cc Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/heap/gc-tracer.cc Mon Nov 10 10:45:50 2014 UTC
@@ -24,6 +24,11 @@
duration_ = duration;
allocation_in_bytes_ = allocation_in_bytes;
}
+
+
+GCTracer::ContextDisposalEvent::ContextDisposalEvent(double time) {
+ time_ = time;
+}
GCTracer::Event::Event(Type type, const char* gc_reason,
@@ -205,6 +210,11 @@
intptr_t allocation_in_bytes) {
allocation_events_.push_front(AllocationEvent(duration,
allocation_in_bytes));
}
+
+
+void GCTracer::AddContextDisposalTime(double time) {
+ context_disposal_events_.push_front(ContextDisposalEvent(time));
+}
void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
@@ -319,6 +329,7 @@
PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
NewSpaceAllocationThroughputInBytesPerMillisecond());
+ PrintF("context_disposal_rate=%.1f ",
ContextDisposalRateInMilliseconds());
if (current_.type == Event::SCAVENGER) {
PrintF("steps_count=%d ", current_.incremental_marking_steps);
@@ -476,5 +487,21 @@
return static_cast<intptr_t>(bytes / durations);
}
+
+
+double GCTracer::ContextDisposalRateInMilliseconds() const {
+ if (context_disposal_events_.size() == 0) return 0.0;
+
+ double begin = base::OS::TimeCurrentMillis();
+ double end = 0.0;
+ ContextDisposalEventBuffer::const_iterator iter =
+ context_disposal_events_.begin();
+ while (iter != context_disposal_events_.end()) {
+ end = iter->time_;
+ ++iter;
+ }
+
+ return (begin - end) / context_disposal_events_.size();
+}
}
} // namespace v8::internal
=======================================
--- /trunk/src/heap/gc-tracer.h Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/heap/gc-tracer.h Mon Nov 10 10:45:50 2014 UTC
@@ -145,6 +145,19 @@
intptr_t allocation_in_bytes_;
};
+
+ class ContextDisposalEvent {
+ public:
+ // Default constructor leaves the event uninitialized.
+ ContextDisposalEvent() {}
+
+ explicit ContextDisposalEvent(double time);
+
+ // Time when context disposal event happened.
+ double time_;
+ };
+
+
class Event {
public:
enum Type { SCAVENGER = 0, MARK_COMPACTOR = 1, START = 2 };
@@ -241,6 +254,9 @@
typedef RingBuffer<AllocationEvent, kRingBufferMaxSize>
AllocationEventBuffer;
+ typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
+ ContextDisposalEventBuffer;
+
explicit GCTracer(Heap* heap);
// Start collecting data.
@@ -253,6 +269,8 @@
// Log an allocation throughput event.
void AddNewSpaceAllocationTime(double duration, intptr_t
allocation_in_bytes);
+ void AddContextDisposalTime(double time);
+
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
@@ -322,6 +340,12 @@
// Returns 0 if no events have been recorded.
intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
+ // Computes the context disposal rate in milliseconds. It takes the time
+ // frame of the first recorded context disposal to the current time and
+ // divides it by the number of recorded events.
+ // Returns 0 if no events have been recorded.
+ double ContextDisposalRateInMilliseconds() const;
+
private:
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -359,6 +383,8 @@
// RingBuffer for allocation events.
AllocationEventBuffer allocation_events_;
+ ContextDisposalEventBuffer context_disposal_events_;
+
// Cumulative number of incremental marking steps since creation of
tracer.
int cumulative_incremental_marking_steps_;
=======================================
***Additional files exist in this changeset.***
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.