Diff
Modified: trunk/JSTests/ChangeLog (226032 => 226033)
--- trunk/JSTests/ChangeLog 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/JSTests/ChangeLog 2017-12-18 11:49:33 UTC (rev 226033)
@@ -1,3 +1,64 @@
+2017-12-17 Yusuke Suzuki <utatane....@gmail.com>
+
+ [FTL] NewArrayBuffer should be sinked if it is only used for spreading
+ https://bugs.webkit.org/show_bug.cgi?id=179762
+
+ Reviewed by Saam Barati.
+
+ * stress/call-varargs-double-new-array-buffer.js: Added.
+ (assert):
+ (bar):
+ (foo):
+ * stress/call-varargs-spread-new-array-buffer.js: Added.
+ (assert):
+ (bar):
+ (foo):
+ * stress/call-varargs-spread-new-array-buffer2.js: Added.
+ (assert):
+ (bar):
+ (foo):
+ * stress/forward-varargs-double-new-array-buffer.js: Added.
+ (assert):
+ (test.baz):
+ (test.bar):
+ (test.foo):
+ (test):
+ * stress/new-array-buffer-sinking-osrexit.js: Added.
+ (target):
+ (test):
+ * stress/new-array-with-spread-double-new-array-buffer.js: Added.
+ (shouldBe):
+ (test):
+ * stress/new-array-with-spread-with-phantom-new-array-buffer.js: Added.
+ (shouldBe):
+ (target):
+ (test):
+ * stress/phantom-new-array-buffer-forward-varargs.js: Added.
+ (assert):
+ (test1.bar):
+ (test1.foo):
+ (test1):
+ (test2.bar):
+ (test2.foo):
+ (test3.baz):
+ (test3.bar):
+ (test3.foo):
+ (test4.baz):
+ (test4.bar):
+ (test4.foo):
+ * stress/phantom-new-array-buffer-forward-varargs2.js: Added.
+ (assert):
+ (test.baz):
+ (test.bar):
+ (test.foo):
+ (test):
+ * stress/phantom-new-array-buffer-osr-exit.js: Added.
+ (assert):
+ (baz):
+ (bar):
+ (effects):
+ (foo):
+
2017-12-14 Saam Barati <sbar...@apple.com>
The CleanUp after LICM is erroneously removing a Check
Added: trunk/JSTests/stress/call-varargs-double-new-array-buffer.js (0 => 226033)
--- trunk/JSTests/stress/call-varargs-double-new-array-buffer.js (rev 0)
+++ trunk/JSTests/stress/call-varargs-double-new-array-buffer.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,33 @@
+function assert(b, m = "") {
+ if (!b)
+ throw new Error("Bad assert: " + m);
+}
+noInline(assert);
+
+function bar(...args) {
+ return args;
+}
+noInline(bar);
+
+function foo(a, ...args) {
+ let x = bar(...args, 42, ...[0.5, 1.5, 2.5, 3.5, 4.5], ...args);
+ return x;
+}
+noInline(foo);
+
+for (let i = 0; i < 10000; i++) {
+ let r = foo(i, i+1, i+2, i+3);
+ assert(r.length === 12);
+ assert(r[0] === i+1, JSON.stringify(r));
+ assert(r[1] === i+2, JSON.stringify(r));
+ assert(r[2] === i+3, JSON.stringify(r));
+ assert(r[3] === 42, JSON.stringify(r));
+ assert(r[4] === 0.5, JSON.stringify(r));
+ assert(r[5] === 1.5, JSON.stringify(r));
+ assert(r[6] === 2.5, JSON.stringify(r));
+ assert(r[7] === 3.5, JSON.stringify(r));
+ assert(r[8] === 4.5, JSON.stringify(r));
+ assert(r[9] === i+1, JSON.stringify(r));
+ assert(r[10] === i+2, JSON.stringify(r));
+ assert(r[11] === i+3, JSON.stringify(r));
+}
Added: trunk/JSTests/stress/call-varargs-spread-new-array-buffer.js (0 => 226033)
--- trunk/JSTests/stress/call-varargs-spread-new-array-buffer.js (rev 0)
+++ trunk/JSTests/stress/call-varargs-spread-new-array-buffer.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,29 @@
+function assert(b, m = "") {
+ if (!b)
+ throw new Error("Bad assert: " + m);
+}
+noInline(assert);
+
+function bar(...args) {
+ return args;
+}
+noInline(bar);
+
+function foo() {
+ let args = [1, 2, 3];
+ let x = bar(...args, 42, ...args);
+ return x;
+}
+noInline(foo);
+
+for (let i = 0; i < 10000; i++) {
+ let r = foo();
+ assert(r.length === 7);
+ assert(r[0] === 1, JSON.stringify(r));
+ assert(r[1] === 2, JSON.stringify(r));
+ assert(r[2] === 3, JSON.stringify(r));
+ assert(r[3] === 42, JSON.stringify(r));
+ assert(r[4] === 1, JSON.stringify(r));
+ assert(r[5] === 2, JSON.stringify(r));
+ assert(r[6] === 3, JSON.stringify(r));
+}
Added: trunk/JSTests/stress/call-varargs-spread-new-array-buffer2.js (0 => 226033)
--- trunk/JSTests/stress/call-varargs-spread-new-array-buffer2.js (rev 0)
+++ trunk/JSTests/stress/call-varargs-spread-new-array-buffer2.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,33 @@
+function assert(b, m = "") {
+ if (!b)
+ throw new Error("Bad assert: " + m);
+}
+noInline(assert);
+
+function bar(...args) {
+ return args;
+}
+noInline(bar);
+
+function foo(a, ...args) {
+ let x = bar(...args, 42, ...[0, 1, 2, 3, 4], ...args);
+ return x;
+}
+noInline(foo);
+
+for (let i = 0; i < 10000; i++) {
+ let r = foo(i, i+1, i+2, i+3);
+ assert(r.length === 12);
+ assert(r[0] === i+1, JSON.stringify(r));
+ assert(r[1] === i+2, JSON.stringify(r));
+ assert(r[2] === i+3, JSON.stringify(r));
+ assert(r[3] === 42, JSON.stringify(r));
+ assert(r[4] === 0, JSON.stringify(r));
+ assert(r[5] === 1, JSON.stringify(r));
+ assert(r[6] === 2, JSON.stringify(r));
+ assert(r[7] === 3, JSON.stringify(r));
+ assert(r[8] === 4, JSON.stringify(r));
+ assert(r[9] === i+1, JSON.stringify(r));
+ assert(r[10] === i+2, JSON.stringify(r));
+ assert(r[11] === i+3, JSON.stringify(r));
+}
Added: trunk/JSTests/stress/forward-varargs-double-new-array-buffer.js (0 => 226033)
--- trunk/JSTests/stress/forward-varargs-double-new-array-buffer.js (rev 0)
+++ trunk/JSTests/stress/forward-varargs-double-new-array-buffer.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,33 @@
+"use strict";
+
+function assert(b, m="") {
+ if (!b)
+ throw new Error("Bad assertion: " + m);
+}
+noInline(assert);
+
+function test() {
+ function baz(...args) {
+ return args;
+ }
+ function bar(...args) {
+ return baz(...args);
+ }
+ function foo(a, b, c, ...args) {
+ return bar(...args, a, ...[0.5, 1.5, 2.5]);
+ }
+ noInline(foo);
+
+ for (let i = 0; i < 100000; i++) {
+ let r = foo(i, i+1, i+2, i+3);
+ assert(r.length === 5);
+ let [a, b, c, d, e] = r;
+ assert(a === i+3);
+ assert(b === i);
+ assert(c === 0.5);
+ assert(d === 1.5);
+ assert(e === 2.5);
+ }
+}
+
+test();
Added: trunk/JSTests/stress/new-array-buffer-sinking-osrexit.js (0 => 226033)
--- trunk/JSTests/stress/new-array-buffer-sinking-osrexit.js (rev 0)
+++ trunk/JSTests/stress/new-array-buffer-sinking-osrexit.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,17 @@
+function target(p, a0, a1, a2, a3, a4, a5)
+{
+ if (p)
+ OSRExit();
+ return a5
+}
+
+function test(p)
+{
+ var array = [1,2,3,4,5];
+ return target(p, ...array);
+}
+noInline(test);
+
+for (var i = 0; i < 1e6; ++i)
+ test(false);
+test(true);
Added: trunk/JSTests/stress/new-array-with-spread-double-new-array-buffer.js (0 => 226033)
--- trunk/JSTests/stress/new-array-with-spread-double-new-array-buffer.js (rev 0)
+++ trunk/JSTests/stress/new-array-with-spread-double-new-array-buffer.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,18 @@
+function shouldBe(actual, expected)
+{
+ if (actual !== expected)
+ throw new Error('bad value: ' + actual);
+}
+
+function test()
+{
+ return [...[1.2, 2.3, 3.4]];
+}
+noInline(test);
+
+for (var i = 0; i < 1e6; ++i) {
+ var [a, b, c] = test();
+ shouldBe(a, 1.2);
+ shouldBe(b, 2.3);
+ shouldBe(c, 3.4);
+}
Added: trunk/JSTests/stress/new-array-with-spread-with-phantom-new-array-buffer.js (0 => 226033)
--- trunk/JSTests/stress/new-array-with-spread-with-phantom-new-array-buffer.js (rev 0)
+++ trunk/JSTests/stress/new-array-with-spread-with-phantom-new-array-buffer.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,28 @@
+function shouldBe(actual, expected)
+{
+ if (actual !== expected)
+ throw new Error('bad value: ' + actual);
+}
+
+function target(array)
+{
+ return [...array, 4, ...array];
+}
+
+function test()
+{
+ return target([1, 2, 3]);
+}
+noInline(test);
+
+for (var i = 0; i < 1e6; ++i) {
+ var result = test();
+ shouldBe(result.length, 7);
+ shouldBe(result[0], 1);
+ shouldBe(result[1], 2);
+ shouldBe(result[2], 3);
+ shouldBe(result[3], 4);
+ shouldBe(result[4], 1);
+ shouldBe(result[5], 2);
+ shouldBe(result[6], 3);
+}
Added: trunk/JSTests/stress/phantom-new-array-buffer-forward-varargs.js (0 => 226033)
--- trunk/JSTests/stress/phantom-new-array-buffer-forward-varargs.js (rev 0)
+++ trunk/JSTests/stress/phantom-new-array-buffer-forward-varargs.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,97 @@
+"use strict";
+
+function assert(b, m="") {
+ if (!b)
+ throw new Error("Bad assertion: " + m);
+}
+noInline(assert);
+
+function test1() {
+ function bar(a, b, c, d) {
+ return [a, b, c, d];
+ }
+ function foo() {
+ return bar(...[0, 1, 2, 3]);
+ }
+ noInline(foo);
+
+ for (let i = 0; i < 10000; i++) {
+ let [a, b, c, d] = foo();
+ assert(a === 0);
+ assert(b === 1);
+ assert(c === 2);
+ assert(d === 3) ;
+ }
+}
+
+function test2() {
+ function bar(...args) {
+ return args;
+ }
+ function foo() {
+ let args = [1, 2, 3];
+ return bar(...args, 0, ...args);
+ }
+ noInline(foo);
+
+ for (let i = 0; i < 10000; i++) {
+ let r = foo();
+ assert(r.length === 7);
+ let [a, b, c, d, e, f, g] = r;
+ assert(a === 1);
+ assert(b === 2);
+ assert(c === 3);
+ assert(d === 0);
+ assert(e === 1);
+ assert(f === 2);
+ assert(g === 3);
+ }
+}
+
+function test3() {
+ function baz(...args) {
+ return args;
+ }
+ function bar(...args) {
+ return baz(...args);
+ }
+ function foo() {
+ let args = [3];
+ return bar(...args, 0, ...args);
+ }
+ noInline(foo);
+
+ for (let i = 0; i < 100000; i++) {
+ let r = foo();
+ assert(r.length === 3);
+ let [a, b, c] = r;
+ assert(a === 3);
+ assert(b === 0);
+ assert(c === 3);
+ }
+}
+
+function test4() {
+ function baz(...args) {
+ return args;
+ }
+ function bar(...args) {
+ return baz(...args);
+ }
+ function foo() {
+ let args = [];
+ return bar(...args, 0, ...args);
+ }
+ noInline(foo);
+
+ for (let i = 0; i < 100000; i++) {
+ let r = foo();
+ assert(r.length === 1);
+ assert(r[0] === 0);
+ }
+}
+
+test1();
+test2();
+test3();
+test4();
Added: trunk/JSTests/stress/phantom-new-array-buffer-forward-varargs2.js (0 => 226033)
--- trunk/JSTests/stress/phantom-new-array-buffer-forward-varargs2.js (rev 0)
+++ trunk/JSTests/stress/phantom-new-array-buffer-forward-varargs2.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,33 @@
+"use strict";
+
+function assert(b, m="") {
+ if (!b)
+ throw new Error("Bad assertion: " + m);
+}
+noInline(assert);
+
+function test() {
+ function baz(...args) {
+ return args;
+ }
+ function bar(...args) {
+ return baz(...args);
+ }
+ function foo(a, b, c, ...args) {
+ return bar(...args, a, ...[0, 1, 2]);
+ }
+ noInline(foo);
+
+ for (let i = 0; i < 100000; i++) {
+ let r = foo(i, i+1, i+2, i+3);
+ assert(r.length === 5);
+ let [a, b, c, d, e] = r;
+ assert(a === i+3);
+ assert(b === i);
+ assert(c === 0);
+ assert(d === 1);
+ assert(e === 2);
+ }
+}
+
+test();
Added: trunk/JSTests/stress/phantom-new-array-buffer-osr-exit.js (0 => 226033)
--- trunk/JSTests/stress/phantom-new-array-buffer-osr-exit.js (rev 0)
+++ trunk/JSTests/stress/phantom-new-array-buffer-osr-exit.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,46 @@
+function assert(b) {
+ if (!b)
+ throw new Error("Bad assertion!");
+}
+noInline(assert);
+
+let value = false;
+
+function baz(x) {
+ if (typeof x !== "number") {
+ value = true;
+ }
+ return x;
+}
+noInline(baz);
+
+function bar(...args) {
+ return args;
+}
+
+let didEffects = false;
+function effects() { didEffects = true; }
+noInline(effects);
+
+function foo(a) {
+ let args = [1];
+ let theArgs = [...args, a, ...args];
+ baz(a);
+ if (value) {
+ effects();
+ }
+ let r = bar.apply(null, theArgs);
+ return r;
+}
+noInline(foo);
+
+for (let i = 0; i < 100000; i++) {
+ foo(i);
+ assert(!didEffects);
+}
+let o = {};
+let [a, b, c] = foo(o);
+assert(a === 1);
+assert(b === o);
+assert(c === 1);
+assert(didEffects);
Added: trunk/JSTests/stress/spread-escapes-but-new-array-buffer-does-not.js (0 => 226033)
--- trunk/JSTests/stress/spread-escapes-but-new-array-buffer-does-not.js (rev 0)
+++ trunk/JSTests/stress/spread-escapes-but-new-array-buffer-does-not.js 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,35 @@
+function assert(b) {
+ if (!b)
+ throw new Error;
+}
+noInline(assert);
+
+function getProperties(obj) {
+ let properties = [];
+ for (let name of Object.getOwnPropertyNames(obj)) {
+ properties.push(name);
+ }
+ return properties;
+}
+
+function theFunc(obj, index) {
+ let args = [42, 20];
+ let functions = getProperties(obj);
+ let func = functions[index % functions.length];
+ obj[func](...args);
+}
+
+let obj = {
+ valueOf: function (x, y) {
+ assert(x === 42);
+ assert(y === 20);
+ try {
+ } catch (e) {}
+ }
+};
+
+for (let i = 0; i < 1e5; ++i) {
+ for (let _i = 0; _i < 100; _i++) {
+ }
+ theFunc(obj, 897989);
+}
Modified: trunk/Source/_javascript_Core/ChangeLog (226032 => 226033)
--- trunk/Source/_javascript_Core/ChangeLog 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/ChangeLog 2017-12-18 11:49:33 UTC (rev 226033)
@@ -1,5 +1,64 @@
2017-12-17 Yusuke Suzuki <utatane....@gmail.com>
+ [FTL] NewArrayBuffer should be sinked if it is only used for spreading
+ https://bugs.webkit.org/show_bug.cgi?id=179762
+
+ Reviewed by Saam Barati.
+
+ This patch extends arguments elimination phase to accept NewArrayBuffer.
+ We can convert NewArrayBuffer to PhantomNewArrayBuffer if it is only
+ used by spreading nodes.
+
+ This improves SixSpeed spread.es6 by 3.5x.
+
+ spread.es6 79.1496+-3.5665 ^ 23.6204+-1.8526 ^ definitely 3.3509x faster
+
+ * dfg/DFGAbstractInterpreterInlines.h:
+ (JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects):
+ * dfg/DFGArgumentsEliminationPhase.cpp:
+ * dfg/DFGClobberize.h:
+ (JSC::DFG::clobberize):
+ * dfg/DFGDoesGC.cpp:
+ (JSC::DFG::doesGC):
+ * dfg/DFGFixupPhase.cpp:
+ (JSC::DFG::FixupPhase::fixupNode):
+ * dfg/DFGNode.h:
+ (JSC::DFG::Node::hasNewArrayBufferData):
+ (JSC::DFG::Node::hasVectorLengthHint):
+ (JSC::DFG::Node::hasIndexingType):
+ (JSC::DFG::Node::indexingType):
+ (JSC::DFG::Node::hasCellOperand):
+ (JSC::DFG::Node::isPhantomAllocation):
+ * dfg/DFGNodeType.h:
+ * dfg/DFGOSRAvailabilityAnalysisPhase.cpp:
+ (JSC::DFG::LocalOSRAvailabilityCalculator::executeNode):
+ * dfg/DFGPredictionPropagationPhase.cpp:
+ * dfg/DFGPromotedHeapLocation.cpp:
+ (WTF::printInternal):
+ * dfg/DFGPromotedHeapLocation.h:
+ * dfg/DFGSafeToExecute.h:
+ (JSC::DFG::safeToExecute):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGValidate.cpp:
+ * ftl/FTLCapabilities.cpp:
+ (JSC::FTL::canCompile):
+ * ftl/FTLLowerDFGToB3.cpp:
+ (JSC::FTL::DFG::LowerDFGToB3::compileNode):
+ (JSC::FTL::DFG::LowerDFGToB3::compileNewArrayWithSpread):
+ (JSC::FTL::DFG::LowerDFGToB3::compileSpread):
+ (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargsSpread):
+ (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargs):
+ (JSC::FTL::DFG::LowerDFGToB3::compileForwardVarargs):
+ (JSC::FTL::DFG::LowerDFGToB3::compileForwardVarargsWithSpread):
+ * ftl/FTLOperations.cpp:
+ (JSC::FTL::operationPopulateObjectInOSR):
+ (JSC::FTL::operationMaterializeObjectInOSR):
+
+2017-12-17 Yusuke Suzuki <utatane....@gmail.com>
+
[JSC] Use IsoSpace for JSWeakMap and JSWeakSet to use finalizeUnconditionally
https://bugs.webkit.org/show_bug.cgi?id=180916
Modified: trunk/Source/_javascript_Core/dfg/DFGAbstractInterpreterInlines.h (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGAbstractInterpreterInlines.h 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGAbstractInterpreterInlines.h 2017-12-18 11:49:33 UTC (rev 226033)
@@ -2243,6 +2243,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case BottomValue:
m_state.setDidClobber(true); // Prevent constant folding.
// This claims to return bottom.
Modified: trunk/Source/_javascript_Core/dfg/DFGArgumentsEliminationPhase.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGArgumentsEliminationPhase.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGArgumentsEliminationPhase.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -45,6 +45,7 @@
#include "JSCInlines.h"
#include <wtf/HashSet.h>
#include <wtf/ListDump.h>
+#include <wtf/RecursableLambda.h>
namespace JSC { namespace DFG {
@@ -117,8 +118,10 @@
// We check ArrayUse here because ArrayUse indicates that the iterator
// protocol for Arrays is non-observable by user code (e.g, it hasn't
// been changed).
- if (node->child1().useKind() == ArrayUse && node->child1()->op() == CreateRest && m_candidates.contains(node->child1().node()))
- m_candidates.add(node);
+ if (node->child1().useKind() == ArrayUse) {
+ if ((node->child1()->op() == CreateRest || node->child1()->op() == NewArrayBuffer) && m_candidates.contains(node->child1().node()))
+ m_candidates.add(node);
+ }
}
break;
@@ -130,7 +133,7 @@
for (unsigned i = 0; i < node->numChildren(); i++) {
if (bitVector->get(i)) {
Node* child = m_graph.varArgChild(node, i).node();
- isOK = child->op() == Spread && child->child1()->op() == CreateRest && m_candidates.contains(child);
+ isOK = child->op() == Spread && (child->child1()->op() == CreateRest || child->child1()->op() == NewArrayBuffer) && m_candidates.contains(child);
if (!isOK)
break;
}
@@ -143,6 +146,12 @@
}
break;
}
+
+ case NewArrayBuffer: {
+ if (m_graph.isWatchingHavingABadTimeWatchpoint(node) && !hasAnyArrayStorage(node->indexingType()))
+ m_candidates.add(node);
+ break;
+ }
case CreateScopedArguments:
// FIXME: We could handle this if it wasn't for the fact that scoped arguments are
@@ -324,7 +333,7 @@
if (bitVector->get(i)) {
dontEscape = child->op() == Spread
&& child->child1().useKind() == ArrayUse
- && child->child1()->op() == CreateRest
+ && (child->child1()->op() == CreateRest || child->child1()->op() == NewArrayBuffer)
&& isWatchingHavingABadTimeWatchpoint;
} else
dontEscape = false;
@@ -337,15 +346,17 @@
}
case Spread: {
- bool isOK = node->child1().useKind() == ArrayUse && node->child1()->op() == CreateRest;
+ bool isOK = node->child1().useKind() == ArrayUse && (node->child1()->op() == CreateRest || node->child1()->op() == NewArrayBuffer);
if (!isOK)
escape(node->child1(), node);
break;
}
+ case NewArrayBuffer:
+ break;
case LoadVarargs:
- if (node->loadVarargsData()->offset && (node->child1()->op() == NewArrayWithSpread || node->child1()->op() == Spread))
+ if (node->loadVarargsData()->offset && (node->child1()->op() == NewArrayWithSpread || node->child1()->op() == Spread || node->child1()->op() == NewArrayBuffer))
escape(node->child1(), node);
break;
@@ -355,7 +366,7 @@
case TailCallVarargsInlinedCaller:
escape(node->child1(), node);
escape(node->child2(), node);
- if (node->callVarargsData()->firstVarArgOffset && (node->child3()->op() == NewArrayWithSpread || node->child3()->op() == Spread))
+ if (node->callVarargsData()->firstVarArgOffset && (node->child3()->op() == NewArrayWithSpread || node->child3()->op() == Spread || node->child1()->op() == NewArrayBuffer))
escape(node->child3(), node);
break;
@@ -391,12 +402,13 @@
case CheckStructureOrEmpty:
case CheckStructure: {
- if (!m_candidates.contains(node->child1().node()))
+ Node* target = node->child1().node();
+ if (!m_candidates.contains(target))
break;
Structure* structure = nullptr;
- JSGlobalObject* globalObject = m_graph.globalObjectFor(node->child1().node()->origin.semantic);
- switch (node->child1().node()->op()) {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(target->origin.semantic);
+ switch (target->op()) {
case CreateDirectArguments:
structure = globalObject->directArgumentsStructure();
break;
@@ -404,13 +416,20 @@
structure = globalObject->clonedArgumentsStructure();
break;
case CreateRest:
- ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(node));
+ ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(target));
structure = globalObject->restParameterStructure();
break;
case NewArrayWithSpread:
- ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(node));
+ ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(target));
structure = globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous);
break;
+ case NewArrayBuffer: {
+ ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(target));
+ IndexingType indexingType = target->indexingType();
+ ASSERT(!hasAnyArrayStorage(indexingType));
+ structure = globalObject->originalArrayStructureForIndexingType(indexingType);
+ break;
+ }
default:
RELEASE_ASSERT_NOT_REACHED();
}
@@ -611,7 +630,7 @@
// Therefore, we should have already transformed the allocation before the use
// of an allocation.
ASSERT(candidate->op() == PhantomCreateRest || candidate->op() == PhantomDirectArguments || candidate->op() == PhantomClonedArguments
- || candidate->op() == PhantomSpread || candidate->op() == PhantomNewArrayWithSpread);
+ || candidate->op() == PhantomSpread || candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomNewArrayBuffer);
return true;
};
@@ -653,6 +672,14 @@
node->setOpAndDefaultFlags(PhantomNewArrayWithSpread);
break;
+
+ case NewArrayBuffer:
+ if (!m_candidates.contains(node))
+ break;
+
+ node->setOpAndDefaultFlags(PhantomNewArrayBuffer);
+ node->child1() = Edge(insertionSet.insertConstant(nodeIndex, node->origin, node->cellOperand()));
+ break;
case GetFromArguments: {
Node* candidate = node->child1().node();
@@ -795,53 +822,61 @@
OpInfo(data), Edge(value));
};
- if (candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomSpread) {
- bool canConvertToStaticLoadStores = true;
+ if (candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomNewArrayBuffer || candidate->op() == PhantomSpread) {
+ auto canConvertToStaticLoadStores = recursableLambda([&] (auto self, Node* candidate) -> bool {
+ if (candidate->op() == PhantomSpread)
+ return self(candidate->child1().node());
- auto canConvertToStaticLoadStoresForSpread = [] (Node* spread) {
- ASSERT(spread->op() == PhantomSpread);
- ASSERT(spread->child1()->op() == PhantomCreateRest);
- InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
+ if (candidate->op() == PhantomNewArrayWithSpread) {
+ BitVector* bitVector = candidate->bitVector();
+ for (unsigned i = 0; i < candidate->numChildren(); i++) {
+ if (bitVector->get(i)) {
+ if (!self(m_graph.varArgChild(candidate, i).node()))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // PhantomNewArrayBuffer only contains constants. It can always convert LoadVarargs to static load stores.
+ if (candidate->op() == PhantomNewArrayBuffer)
+ return true;
+
+ ASSERT(candidate->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
return inlineCallFrame && !inlineCallFrame->isVarargs();
- };
+ });
- if (candidate->op() == PhantomNewArrayWithSpread) {
- BitVector* bitVector = candidate->bitVector();
- for (unsigned i = 0; i < candidate->numChildren(); i++) {
- if (bitVector->get(i)) {
- if (!canConvertToStaticLoadStoresForSpread(m_graph.varArgChild(candidate, i).node())) {
- canConvertToStaticLoadStores = false;
- break;
+ if (canConvertToStaticLoadStores(candidate)) {
+ auto countNumberOfArguments = recursableLambda([&](auto self, Node* candidate) -> unsigned {
+ if (candidate->op() == PhantomSpread)
+ return self(candidate->child1().node());
+
+ if (candidate->op() == PhantomNewArrayWithSpread) {
+ BitVector* bitVector = candidate->bitVector();
+ unsigned result = 0;
+ for (unsigned i = 0; i < candidate->numChildren(); i++) {
+ if (bitVector->get(i))
+ result += self(m_graph.varArgChild(candidate, i).node());
+ else
+ ++result;
}
+ return result;
}
- }
- } else
- canConvertToStaticLoadStores = canConvertToStaticLoadStoresForSpread(candidate);
- if (canConvertToStaticLoadStores) {
- unsigned argumentCountIncludingThis = 1; // |this|
+ if (candidate->op() == PhantomNewArrayBuffer)
+ return candidate->castOperand<JSFixedArray*>()->length();
- auto countNumberOfSpreadArguments = [] (Node* spread) -> unsigned {
- ASSERT(spread->op() == PhantomSpread && spread->child1()->op() == PhantomCreateRest);
- unsigned numberOfArgumentsToSkip = spread->child1()->numberOfArgumentsToSkip();
- InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
+ ASSERT(candidate->op() == PhantomCreateRest);
+ unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
+ InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
unsigned frameArgumentCount = inlineCallFrame->argumentCountIncludingThis - 1;
if (frameArgumentCount >= numberOfArgumentsToSkip)
return frameArgumentCount - numberOfArgumentsToSkip;
return 0;
- };
+ });
- if (candidate->op() == PhantomNewArrayWithSpread) {
- BitVector* bitVector = candidate->bitVector();
- for (unsigned i = 0; i < candidate->numChildren(); i++) {
- if (bitVector->get(i))
- argumentCountIncludingThis += countNumberOfSpreadArguments(m_graph.varArgChild(candidate, i).node());
- else
- ++argumentCountIncludingThis;
- }
- } else
- argumentCountIncludingThis += countNumberOfSpreadArguments(candidate);
-
+ unsigned argumentCountIncludingThis = 1 + countNumberOfArguments(candidate); // |this|
if (argumentCountIncludingThis <= varargsData->limit) {
storeArgumentCountIncludingThis(argumentCountIncludingThis);
@@ -848,12 +883,42 @@
DFG_ASSERT(m_graph, node, varargsData->limit - 1 >= varargsData->mandatoryMinimum);
// Define our limit to exclude "this", since that's a bit easier to reason about.
unsigned limit = varargsData->limit - 1;
- unsigned storeIndex = 0;
- auto forwardSpread = [&] (Node* spread, unsigned storeIndex) -> unsigned {
- ASSERT(spread->op() == PhantomSpread && spread->child1()->op() == PhantomCreateRest);
- unsigned numberOfArgumentsToSkip = spread->child1()->numberOfArgumentsToSkip();
- InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
+ auto forwardNode = recursableLambda([&](auto self, Node* candidate, unsigned storeIndex) -> unsigned {
+ if (candidate->op() == PhantomSpread)
+ return self(candidate->child1().node(), storeIndex);
+
+ if (candidate->op() == PhantomNewArrayWithSpread) {
+ BitVector* bitVector = candidate->bitVector();
+ for (unsigned i = 0; i < candidate->numChildren(); i++) {
+ if (bitVector->get(i))
+ storeIndex = self(m_graph.varArgChild(candidate, i).node(), storeIndex);
+ else {
+ Node* value = m_graph.varArgChild(candidate, i).node();
+ storeValue(value, storeIndex++);
+ }
+ }
+ return storeIndex;
+ }
+
+ if (candidate->op() == PhantomNewArrayBuffer) {
+ bool canExit = true;
+ auto* array = candidate->castOperand<JSFixedArray*>();
+ for (unsigned index = 0; index < array->length(); ++index) {
+ JSValue constant;
+ if (candidate->indexingType() == ArrayWithDouble)
+ constant = jsDoubleNumber(array->get(index).asNumber());
+ else
+ constant = array->get(index);
+ Node* value = insertionSet.insertConstant(nodeIndex, node->origin.withExitOK(canExit), constant);
+ storeValue(value, storeIndex++);
+ }
+ return storeIndex;
+ }
+
+ ASSERT(candidate->op() == PhantomCreateRest);
+ unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
+ InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
unsigned frameArgumentCount = inlineCallFrame->argumentCountIncludingThis - 1;
for (unsigned loadIndex = numberOfArgumentsToSkip; loadIndex < frameArgumentCount; ++loadIndex) {
VirtualRegister reg = virtualRegisterForArgument(loadIndex + 1) + inlineCallFrame->stackOffset;
@@ -861,26 +926,12 @@
Node* value = insertionSet.insertNode(
nodeIndex, SpecNone, GetStack, node->origin.withExitOK(canExit),
OpInfo(data));
- storeValue(value, storeIndex);
- ++storeIndex;
+ storeValue(value, storeIndex++);
}
return storeIndex;
- };
+ });
- if (candidate->op() == PhantomNewArrayWithSpread) {
- BitVector* bitVector = candidate->bitVector();
- for (unsigned i = 0; i < candidate->numChildren(); i++) {
- if (bitVector->get(i))
- storeIndex = forwardSpread(m_graph.varArgChild(candidate, i).node(), storeIndex);
- else {
- Node* value = m_graph.varArgChild(candidate, i).node();
- storeValue(value, storeIndex);
- ++storeIndex;
- }
- }
- } else
- storeIndex = forwardSpread(candidate, storeIndex);
-
+ unsigned storeIndex = forwardNode(candidate, 0);
RELEASE_ASSERT(storeIndex <= limit);
Node* undefined = nullptr;
for (; storeIndex < limit; ++storeIndex) {
@@ -1021,37 +1072,69 @@
}
};
- if (candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomSpread) {
- bool canTransformToStaticArgumentCountCall = true;
+ if (candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomNewArrayBuffer || candidate->op() == PhantomSpread) {
+ auto canTransformToStaticArgumentCountCall = recursableLambda([&](auto self, Node* candidate) -> bool {
+ if (candidate->op() == PhantomSpread)
+ return self(candidate->child1().node());
- auto canTransformToStaticArgumentCountCallForSpread = [] (Node* spread) {
- ASSERT(spread->op() == PhantomSpread);
- ASSERT(spread->child1()->op() == PhantomCreateRest);
- InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
+ if (candidate->op() == PhantomNewArrayWithSpread) {
+ BitVector* bitVector = candidate->bitVector();
+ for (unsigned i = 0; i < candidate->numChildren(); i++) {
+ if (bitVector->get(i)) {
+ Node* spread = m_graph.varArgChild(candidate, i).node();
+ if (!self(spread))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // PhantomNewArrayBuffer only contains constants. It can always convert LoadVarargs to static load stores.
+ if (candidate->op() == PhantomNewArrayBuffer)
+ return true;
+
+ ASSERT(candidate->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
return inlineCallFrame && !inlineCallFrame->isVarargs();
- };
+ });
- if (candidate->op() == PhantomNewArrayWithSpread) {
- BitVector* bitVector = candidate->bitVector();
- for (unsigned i = 0; i < candidate->numChildren(); i++) {
- if (bitVector->get(i)) {
- Node* spread = m_graph.varArgChild(candidate, i).node();
- if (!canTransformToStaticArgumentCountCallForSpread(spread)) {
- canTransformToStaticArgumentCountCall = false;
- break;
+ if (canTransformToStaticArgumentCountCall(candidate)) {
+ Vector<Node*> arguments;
+ auto appendNode = recursableLambda([&](auto self, Node* candidate) -> void {
+ if (candidate->op() == PhantomSpread) {
+ self(candidate->child1().node());
+ return;
+ }
+
+ if (candidate->op() == PhantomNewArrayWithSpread) {
+ BitVector* bitVector = candidate->bitVector();
+ for (unsigned i = 0; i < candidate->numChildren(); i++) {
+ Node* child = m_graph.varArgChild(candidate, i).node();
+ if (bitVector->get(i))
+ self(child);
+ else
+ arguments.append(child);
}
+ return;
}
- }
- } else
- canTransformToStaticArgumentCountCall = canTransformToStaticArgumentCountCallForSpread(candidate);
- if (canTransformToStaticArgumentCountCall) {
- Vector<Node*> arguments;
- auto appendSpread = [&] (Node* spread) {
- ASSERT(spread->op() == PhantomSpread);
- ASSERT(spread->child1()->op() == PhantomCreateRest);
- InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
- unsigned numberOfArgumentsToSkip = spread->child1()->numberOfArgumentsToSkip();
+ if (candidate->op() == PhantomNewArrayBuffer) {
+ bool canExit = true;
+ auto* array = candidate->castOperand<JSFixedArray*>();
+ for (unsigned index = 0; index < array->length(); ++index) {
+ JSValue constant;
+ if (candidate->indexingType() == ArrayWithDouble)
+ constant = jsDoubleNumber(array->get(index).asNumber());
+ else
+ constant = array->get(index);
+ arguments.append(insertionSet.insertConstant(nodeIndex, node->origin.withExitOK(canExit), constant));
+ }
+ return;
+ }
+
+ ASSERT(candidate->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
+ unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
for (unsigned i = 1 + numberOfArgumentsToSkip; i < inlineCallFrame->argumentCountIncludingThis; ++i) {
StackAccessData* data = ""
virtualRegisterForArgument(i) + inlineCallFrame->stackOffset,
@@ -1062,20 +1145,9 @@
arguments.append(value);
}
- };
+ });
- if (candidate->op() == PhantomNewArrayWithSpread) {
- BitVector* bitVector = candidate->bitVector();
- for (unsigned i = 0; i < candidate->numChildren(); i++) {
- Node* child = m_graph.varArgChild(candidate, i).node();
- if (bitVector->get(i))
- appendSpread(child);
- else
- arguments.append(child);
- }
- } else
- appendSpread(candidate);
-
+ appendNode(candidate);
convertToStaticArgumentCountCall(arguments);
} else
convertToForwardsCall();
Modified: trunk/Source/_javascript_Core/dfg/DFGClobberize.h (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGClobberize.h 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGClobberize.h 2017-12-18 11:49:33 UTC (rev 226033)
@@ -523,6 +523,7 @@
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case PhantomCreateRest:
// Even though it's phantom, it still has the property that one can't be replaced with another.
read(HeapObjectCount);
Modified: trunk/Source/_javascript_Core/dfg/DFGDoesGC.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGDoesGC.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGDoesGC.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -267,6 +267,7 @@
case PhantomDirectArguments:
case PhantomCreateRest:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case PhantomSpread:
case PhantomClonedArguments:
case GetMyArgumentByVal:
Modified: trunk/Source/_javascript_Core/dfg/DFGFixupPhase.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGFixupPhase.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGFixupPhase.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -1642,6 +1642,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case PhantomClonedArguments:
case GetMyArgumentByVal:
case GetMyArgumentByValOutOfBounds:
Modified: trunk/Source/_javascript_Core/dfg/DFGNode.h (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGNode.h 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGNode.h 2017-12-18 11:49:33 UTC (rev 226033)
@@ -1092,7 +1092,7 @@
bool hasNewArrayBufferData()
{
- return op() == NewArrayBuffer;
+ return op() == NewArrayBuffer || op() == PhantomNewArrayBuffer;
}
NewArrayBufferData newArrayBufferData()
@@ -1103,7 +1103,7 @@
unsigned hasVectorLengthHint()
{
- return op() == NewArrayBuffer;
+ return op() == NewArrayBuffer || op() == PhantomNewArrayBuffer;
}
unsigned vectorLengthHint()
@@ -1118,6 +1118,7 @@
case NewArray:
case NewArrayWithSize:
case NewArrayBuffer:
+ case PhantomNewArrayBuffer:
return true;
default:
return false;
@@ -1141,7 +1142,7 @@
IndexingType indexingType()
{
ASSERT(hasIndexingType());
- if (op() == NewArrayBuffer)
+ if (op() == NewArrayBuffer || op() == PhantomNewArrayBuffer)
return static_cast<IndexingType>(newArrayBufferData().indexingType);
return static_cast<IndexingType>(m_opInfo.as<uint32_t>());
}
@@ -1641,6 +1642,7 @@
case MaterializeCreateActivation:
case NewRegexp:
case NewArrayBuffer:
+ case PhantomNewArrayBuffer:
case CompareEqPtr:
case CallObjectConstructor:
case DirectCall:
@@ -1904,6 +1906,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case PhantomClonedArguments:
case PhantomNewFunction:
case PhantomNewGeneratorFunction:
Modified: trunk/Source/_javascript_Core/dfg/DFGNodeType.h (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGNodeType.h 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGNodeType.h 2017-12-18 11:49:33 UTC (rev 226033)
@@ -371,6 +371,7 @@
macro(PhantomCreateRest, NodeResultJS | NodeMustGenerate) \
macro(PhantomSpread, NodeResultJS | NodeMustGenerate) \
macro(PhantomNewArrayWithSpread, NodeResultJS | NodeMustGenerate | NodeHasVarArgs) \
+ macro(PhantomNewArrayBuffer, NodeResultJS | NodeMustGenerate) \
macro(CreateScopedArguments, NodeResultJS) \
macro(CreateClonedArguments, NodeResultJS) \
macro(PhantomClonedArguments, NodeResultJS | NodeMustGenerate) \
Modified: trunk/Source/_javascript_Core/dfg/DFGOSRAvailabilityAnalysisPhase.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGOSRAvailabilityAnalysisPhase.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGOSRAvailabilityAnalysisPhase.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -277,6 +277,10 @@
m_availability.m_heap.set(PromotedHeapLocation(NewArrayWithSpreadArgumentPLoc, node, i), Availability(child));
}
break;
+
+ case PhantomNewArrayBuffer:
+ m_availability.m_heap.set(PromotedHeapLocation(NewArrayBufferPLoc, node), Availability(node->child1().node()));
+ break;
default:
break;
Modified: trunk/Source/_javascript_Core/dfg/DFGPredictionPropagationPhase.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGPredictionPropagationPhase.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGPredictionPropagationPhase.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -1086,6 +1086,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case PhantomClonedArguments:
case GetMyArgumentByVal:
case GetMyArgumentByValOutOfBounds:
Modified: trunk/Source/_javascript_Core/dfg/DFGPromotedHeapLocation.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGPromotedHeapLocation.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGPromotedHeapLocation.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -122,6 +122,10 @@
case NewArrayWithSpreadArgumentPLoc:
out.print("NewArrayWithSpreadArgumentPLoc");
return;
+
+ case NewArrayBufferPLoc:
+ out.print("NewArrayBufferPLoc");
+ return;
}
RELEASE_ASSERT_NOT_REACHED();
Modified: trunk/Source/_javascript_Core/dfg/DFGPromotedHeapLocation.h (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGPromotedHeapLocation.h 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGPromotedHeapLocation.h 2017-12-18 11:49:33 UTC (rev 226033)
@@ -63,6 +63,7 @@
VectorLengthPLoc,
SpreadPLoc,
NewArrayWithSpreadArgumentPLoc,
+ NewArrayBufferPLoc,
};
class PromotedLocationDescriptor {
Modified: trunk/Source/_javascript_Core/dfg/DFGSafeToExecute.h (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGSafeToExecute.h 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGSafeToExecute.h 2017-12-18 11:49:33 UTC (rev 226033)
@@ -399,6 +399,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case PhantomClonedArguments:
case GetMyArgumentByVal:
case GetMyArgumentByValOutOfBounds:
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -5659,6 +5659,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case AtomicsIsLockFree:
case AtomicsAdd:
case AtomicsAnd:
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT64.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT64.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT64.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -6137,6 +6137,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case IdentityWithProfile:
case CPUIntrinsic:
DFG_CRASH(m_jit.graph(), node, "Unexpected node");
Modified: trunk/Source/_javascript_Core/dfg/DFGValidate.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/dfg/DFGValidate.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/dfg/DFGValidate.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -759,8 +759,8 @@
case PhantomSpread:
VALIDATE((node), m_graph.m_form == SSA);
- // We currently only support PhantomSpread over PhantomCreateRest.
- VALIDATE((node), node->child1()->op() == PhantomCreateRest);
+ // We currently support PhantomSpread over PhantomCreateRest and PhantomNewArrayBuffer.
+ VALIDATE((node), node->child1()->op() == PhantomCreateRest || node->child1()->op() == PhantomNewArrayBuffer);
break;
case PhantomNewArrayWithSpread: {
@@ -769,7 +769,7 @@
for (unsigned i = 0; i < node->numChildren(); i++) {
Node* child = m_graph.varArgChild(node, i).node();
if (bitVector->get(i)) {
- // We currently only support PhantomSpread over PhantomCreateRest.
+ // We currently support PhantomSpread over PhantomCreateRest and PhantomNewArrayBuffer.
VALIDATE((node), child->op() == PhantomSpread);
} else
VALIDATE((node), !child->isPhantomAllocation());
@@ -777,6 +777,10 @@
break;
}
+ case PhantomNewArrayBuffer:
+ VALIDATE((node), m_graph.m_form == SSA);
+ break;
+
case NewArrayWithSpread: {
BitVector* bitVector = node->bitVector();
for (unsigned i = 0; i < node->numChildren(); i++) {
@@ -791,7 +795,7 @@
}
case Spread:
- VALIDATE((node), !node->child1()->isPhantomAllocation() || node->child1()->op() == PhantomCreateRest);
+ VALIDATE((node), !node->child1()->isPhantomAllocation() || node->child1()->op() == PhantomCreateRest || node->child1()->op() == PhantomNewArrayBuffer);
break;
case EntrySwitch:
Modified: trunk/Source/_javascript_Core/ftl/FTLCapabilities.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/ftl/FTLCapabilities.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/ftl/FTLCapabilities.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -253,6 +253,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case PhantomClonedArguments:
case GetMyArgumentByVal:
case GetMyArgumentByValOutOfBounds:
Modified: trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -92,6 +92,7 @@
#include <unordered_set>
#include <wtf/Box.h>
#include <wtf/Gigacage.h>
+#include <wtf/RecursableLambda.h>
#undef RELEASE_ASSERT
#define RELEASE_ASSERT(assertion) do { \
@@ -1256,6 +1257,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
case PhantomClonedArguments:
case PutHint:
case BottomValue:
@@ -5138,7 +5140,7 @@
void compileNewArrayWithSpread()
{
if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
- unsigned startLength = 0;
+ CheckedInt32 startLength = 0;
BitVector* bitVector = m_node->bitVector();
HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
@@ -5145,28 +5147,42 @@
for (unsigned i = 0; i < m_node->numChildren(); ++i) {
if (!bitVector->get(i))
++startLength;
+ else {
+ Edge& child = m_graph.varArgChild(m_node, i);
+ if (child->op() == PhantomSpread && child->child1()->op() == PhantomNewArrayBuffer)
+ startLength += child->child1()->castOperand<JSFixedArray*>()->length();
+ }
}
- LValue length = m_out.constInt32(startLength);
+ if (startLength.hasOverflowed()) {
+ terminate(Overflow);
+ return;
+ }
+ LValue length = m_out.constInt32(startLength.unsafeGet());
+
for (unsigned i = 0; i < m_node->numChildren(); ++i) {
if (bitVector->get(i)) {
Edge use = m_graph.varArgChild(m_node, i);
CheckValue* lengthCheck = nullptr;
if (use->op() == PhantomSpread) {
- RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
- InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame;
- unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
- LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
- return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
- }).iterator->value;
- lengthCheck = m_out.speculateAdd(length, spreadLength);
+ if (use->child1()->op() == PhantomCreateRest) {
+ InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame;
+ unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
+ LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
+ return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
+ }).iterator->value;
+ lengthCheck = m_out.speculateAdd(length, spreadLength);
+ }
} else {
LValue fixedArray = lowCell(use);
lengthCheck = m_out.speculateAdd(length, m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
}
- blessSpeculation(lengthCheck, Overflow, noValue(), nullptr, m_origin);
- length = lengthCheck;
+
+ if (lengthCheck) {
+ blessSpeculation(lengthCheck, Overflow, noValue(), nullptr, m_origin);
+ length = lengthCheck;
+ }
}
}
@@ -5180,45 +5196,57 @@
Edge use = m_graph.varArgChild(m_node, i);
if (bitVector->get(i)) {
if (use->op() == PhantomSpread) {
- RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
- InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame;
- unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
+ if (use->child1()->op() == PhantomNewArrayBuffer) {
+ IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
+ auto* array = use->child1()->castOperand<JSFixedArray*>();
+ for (unsigned i = 0; i < array->length(); ++i) {
+ // Because resulted array from NewArrayWithSpread is always contiguous, we should not generate value
+ // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
+ int64_t value = JSValue::encode(array->get(i));
+ m_out.store64(m_out.constInt64(value), m_out.baseIndex(heap, storage, index, JSValue(), (Checked<int32_t>(sizeof(JSValue)) * i).unsafeGet()));
+ }
+ index = m_out.add(index, m_out.constIntPtr(array->length()));
+ } else {
+ RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame;
+ unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
- LValue length = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
- LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
+ LValue length = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
+ LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
- LBasicBlock loopStart = m_out.newBlock();
- LBasicBlock continuation = m_out.newBlock();
+ LBasicBlock loopStart = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
- ValueFromBlock loadIndexStart = m_out.anchor(m_out.constIntPtr(0));
- ValueFromBlock arrayIndexStart = m_out.anchor(index);
- ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
+ ValueFromBlock loadIndexStart = m_out.anchor(m_out.constIntPtr(0));
+ ValueFromBlock arrayIndexStart = m_out.anchor(index);
+ ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
- m_out.branch(
- m_out.isZero64(length),
- unsure(continuation), unsure(loopStart));
+ m_out.branch(
+ m_out.isZero64(length),
+ unsure(continuation), unsure(loopStart));
- LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
+ LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
- LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
- LValue loadIndex = m_out.phi(pointerType(), loadIndexStart);
+ LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
+ LValue loadIndex = m_out.phi(pointerType(), loadIndexStart);
- LValue item = m_out.load64(m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
- m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
+ LValue item = m_out.load64(m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
+ m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
- LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
- LValue nextLoadIndex = m_out.add(loadIndex, m_out.constIntPtr(1));
- ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
+ LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
+ LValue nextLoadIndex = m_out.add(loadIndex, m_out.constIntPtr(1));
+ ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
- m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
- m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
+ m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
+ m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
- m_out.branch(
- m_out.below(nextLoadIndex, length),
- unsure(loopStart), unsure(continuation));
+ m_out.branch(
+ m_out.below(nextLoadIndex, length),
+ unsure(loopStart), unsure(continuation));
- m_out.appendTo(continuation, lastNext);
- index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
+ m_out.appendTo(continuation, lastNext);
+ index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
+ }
} else {
LBasicBlock loopStart = m_out.newBlock();
LBasicBlock continuation = m_out.newBlock();
@@ -5294,6 +5322,11 @@
void compileSpread()
{
+ if (m_node->child1()->op() == PhantomNewArrayBuffer) {
+ setJSValue(frozenPointer(m_node->child1()->cellOperand()));
+ return;
+ }
+
if (m_node->child1()->op() == PhantomCreateRest) {
// This IR is rare to generate since it requires escaping the Spread
// but not the CreateRest. In bytecode, we have only few operations that
@@ -7114,43 +7147,49 @@
LValue jsCallee = lowJSValue(m_node->child1());
LValue thisArg = lowJSValue(m_node->child2());
- RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread);
+ RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread || arguments->op() == PhantomNewArrayBuffer);
- unsigned numNonSpreadParameters = 0;
+ unsigned staticArgumentCount = 0;
Vector<LValue, 2> spreadLengths;
Vector<LValue, 8> patchpointArguments;
HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
+ auto pushAndCountArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
+ if (target->op() == PhantomSpread) {
+ self(target->child1().node());
+ return;
+ }
- auto loadSpreadLength = [this, &cachedSpreadLengths] (Node* spread) -> LValue {
- RELEASE_ASSERT(spread->op() == PhantomSpread);
- RELEASE_ASSERT(spread->child1()->op() == PhantomCreateRest);
- InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
- unsigned numberOfArgumentsToSkip = spread->child1()->numberOfArgumentsToSkip();
- return cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
- return m_out.zeroExtPtr(getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
- }).iterator->value;
- };
-
- if (arguments->op() == PhantomNewArrayWithSpread) {
- BitVector* bitVector = arguments->bitVector();
- for (unsigned i = 0; i < arguments->numChildren(); i++) {
- if (bitVector->get(i)) {
- LValue length = loadSpreadLength(m_graph.varArgChild(arguments, i).node());
- patchpointArguments.append(length);
- spreadLengths.append(length);
- } else {
- ++numNonSpreadParameters;
- LValue argument = lowJSValue(m_graph.varArgChild(arguments, i));
- patchpointArguments.append(argument);
+ if (target->op() == PhantomNewArrayWithSpread) {
+ BitVector* bitVector = target->bitVector();
+ for (unsigned i = target->numChildren(); i--; ) {
+ if (bitVector->get(i))
+ self(m_graph.varArgChild(target, i).node());
+ else {
+ ++staticArgumentCount;
+ LValue argument = this->lowJSValue(m_graph.varArgChild(target, i));
+ patchpointArguments.append(argument);
+ }
}
+ return;
}
- } else {
- LValue length = loadSpreadLength(arguments);
+
+ if (target->op() == PhantomNewArrayBuffer) {
+ staticArgumentCount += target->castOperand<JSFixedArray*>()->length();
+ return;
+ }
+
+ RELEASE_ASSERT(target->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame;
+ unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
+ LValue length = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
+ return m_out.zeroExtPtr(this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
+ }).iterator->value;
patchpointArguments.append(length);
spreadLengths.append(length);
- }
+ });
- LValue argumentCountIncludingThis = m_out.constIntPtr(numNonSpreadParameters + 1);
+ pushAndCountArgumentsFromRightToLeft(arguments);
+ LValue argumentCountIncludingThis = m_out.constIntPtr(staticArgumentCount + 1);
for (LValue length : spreadLengths)
argumentCountIncludingThis = m_out.add(length, argumentCountIncludingThis);
@@ -7263,14 +7302,51 @@
int storeOffset = CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register));
unsigned paramsOffset = 4;
- auto emitSpread = [&] (Node* spread, unsigned index) {
- RELEASE_ASSERT(spread->op() == PhantomSpread);
- RELEASE_ASSERT(spread->child1()->op() == PhantomCreateRest);
- InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
+ unsigned index = 0;
+ auto emitArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
+ if (target->op() == PhantomSpread) {
+ self(target->child1().node());
+ return;
+ }
- unsigned numberOfArgumentsToSkip = spread->child1()->numberOfArgumentsToSkip();
+ if (target->op() == PhantomNewArrayWithSpread) {
+ BitVector* bitVector = target->bitVector();
+ for (unsigned i = target->numChildren(); i--; ) {
+ if (bitVector->get(i))
+ self(state->graph.varArgChild(target, i).node());
+ else {
+ jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
+ getValueFromRep(params[paramsOffset + (index++)], scratchGPR3);
+ jit.store64(scratchGPR3,
+ CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
+ }
+ }
+ return;
+ }
- B3::ValueRep numArgumentsToCopy = params[paramsOffset + index];
+ if (target->op() == PhantomNewArrayBuffer) {
+ auto* array = target->castOperand<JSFixedArray*>();
+ Checked<int32_t> offsetCount { 1 };
+ for (unsigned i = array->length(); i--; ++offsetCount) {
+ // Because varargs values are drained as JSValue, we should not generate value
+ // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
+ int64_t value = JSValue::encode(array->get(i));
+ jit.move(CCallHelpers::TrustedImm64(value), scratchGPR3);
+ Checked<int32_t> currentStoreOffset { storeOffset };
+ currentStoreOffset -= (offsetCount * static_cast<int32_t>(sizeof(Register)));
+ jit.store64(scratchGPR3,
+ CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, currentStoreOffset.unsafeGet()));
+ }
+ jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(array->length())), scratchGPR2);
+ return;
+ }
+
+ RELEASE_ASSERT(target->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame;
+
+ unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
+
+ B3::ValueRep numArgumentsToCopy = params[paramsOffset + (index++)];
getValueFromRep(numArgumentsToCopy, scratchGPR3);
int loadOffset = (AssemblyHelpers::argumentsStart(inlineCallFrame).offset() + numberOfArgumentsToSkip) * static_cast<int>(sizeof(Register));
@@ -7283,22 +7359,8 @@
CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR3).linkTo(loopStart, &jit);
done.link(&jit);
- };
-
- if (arguments->op() == PhantomNewArrayWithSpread) {
- BitVector* bitVector = arguments->bitVector();
- for (unsigned i = arguments->numChildren(); i--; ) {
- if (bitVector->get(i))
- emitSpread(state->graph.varArgChild(arguments, i).node(), i);
- else {
- jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
- getValueFromRep(params[paramsOffset + i], scratchGPR3);
- jit.store64(scratchGPR3,
- CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
- }
- }
- } else
- emitSpread(arguments, 0);
+ });
+ emitArgumentsFromRightToLeft(arguments);
}
{
@@ -7415,7 +7477,7 @@
if (forwarding && m_node->child3()) {
Node* arguments = m_node->child3().node();
- if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread) {
+ if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
compileCallOrConstructVarargsSpread();
return;
}
@@ -7799,7 +7861,7 @@
{
if (m_node->child1()) {
Node* arguments = m_node->child1().node();
- if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread) {
+ if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
compileForwardVarargsWithSpread();
return;
}
@@ -7929,32 +7991,42 @@
HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
Node* arguments = m_node->child1().node();
- RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread);
+ RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread);
unsigned numberOfStaticArguments = 0;
Vector<LValue, 2> spreadLengths;
- auto loadSpreadLength = [this, &cachedSpreadLengths] (Node* spread) -> LValue {
- ASSERT(spread->op() == PhantomSpread);
- ASSERT(spread->child1()->op() == PhantomCreateRest);
- InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
- unsigned numberOfArgumentsToSkip = spread->child1()->numberOfArgumentsToSkip();
- return cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
- return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
- }).iterator->value;
- };
+ auto collectArgumentCount = recursableLambda([&](auto self, Node* target) -> void {
+ if (target->op() == PhantomSpread) {
+ self(target->child1().node());
+ return;
+ }
- if (arguments->op() == PhantomNewArrayWithSpread) {
- BitVector* bitVector = arguments->bitVector();
- for (unsigned i = 0; i < arguments->numChildren(); i++) {
- if (bitVector->get(i))
- spreadLengths.append(loadSpreadLength(m_graph.varArgChild(arguments, i).node()));
- else
- ++numberOfStaticArguments;
+ if (target->op() == PhantomNewArrayWithSpread) {
+ BitVector* bitVector = target->bitVector();
+ for (unsigned i = 0; i < target->numChildren(); i++) {
+ if (bitVector->get(i))
+ self(m_graph.varArgChild(target, i).node());
+ else
+ ++numberOfStaticArguments;
+ }
+ return;
}
- } else
- spreadLengths.append(loadSpreadLength(arguments));
+ if (target->op() == PhantomNewArrayBuffer) {
+ numberOfStaticArguments += target->castOperand<JSFixedArray*>()->length();
+ return;
+ }
+
+ ASSERT(target->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame;
+ unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
+ spreadLengths.append(cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
+ return this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
+ }).iterator->value);
+ });
+
+ collectArgumentCount(arguments);
LValue lengthIncludingThis = m_out.constInt32(1 + numberOfStaticArguments);
for (LValue length : spreadLengths)
lengthIncludingThis = m_out.add(lengthIncludingThis, length);
@@ -7967,14 +8039,40 @@
m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
LValue targetStart = addressFor(data->machineStart).value();
- LValue storeIndex = m_out.constIntPtr(0);
- auto forwardSpread = [this, &cachedSpreadLengths, &targetStart] (Node* spread, LValue storeIndex) -> LValue {
- RELEASE_ASSERT(spread->op() == PhantomSpread);
- RELEASE_ASSERT(spread->child1()->op() == PhantomCreateRest);
- InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
+ auto forwardSpread = recursableLambda([this, &cachedSpreadLengths, &targetStart](auto self, Node* target, LValue storeIndex) -> LValue {
+ if (target->op() == PhantomSpread)
+ return self(target->child1().node(), storeIndex);
- LValue sourceStart = getArgumentsStart(inlineCallFrame, spread->child1()->numberOfArgumentsToSkip());
+ if (target->op() == PhantomNewArrayWithSpread) {
+ BitVector* bitVector = target->bitVector();
+ for (unsigned i = 0; i < target->numChildren(); i++) {
+ if (bitVector->get(i))
+ storeIndex = self(m_graph.varArgChild(target, i).node(), storeIndex);
+ else {
+ LValue value = this->lowJSValue(m_graph.varArgChild(target, i));
+ m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, storeIndex));
+ storeIndex = m_out.add(m_out.constIntPtr(1), storeIndex);
+ }
+ }
+ return storeIndex;
+ }
+
+ if (target->op() == PhantomNewArrayBuffer) {
+ auto* array = target->castOperand<JSFixedArray*>();
+ for (unsigned i = 0; i < array->length(); i++) {
+ // Because forwarded values are drained as JSValue, we should not generate value
+ // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
+ int64_t value = JSValue::encode(array->get(i));
+ m_out.store64(m_out.constInt64(value), m_out.baseIndex(m_heaps.variables, targetStart, storeIndex, JSValue(), (Checked<int32_t>(sizeof(Register)) * i).unsafeGet()));
+ }
+ return m_out.add(m_out.constIntPtr(array->length()), storeIndex);
+ }
+
+ RELEASE_ASSERT(target->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame;
+
+ LValue sourceStart = this->getArgumentsStart(inlineCallFrame, target->numberOfArgumentsToSkip());
LValue spreadLength = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
LBasicBlock loop = m_out.newBlock();
@@ -8000,21 +8098,9 @@
m_out.appendTo(continuation, lastNext);
return m_out.phi(Int64, startStoreIndexForEnd, loopStoreIndexForEnd);
- };
+ });
- if (arguments->op() == PhantomNewArrayWithSpread) {
- BitVector* bitVector = arguments->bitVector();
- for (unsigned i = 0; i < arguments->numChildren(); i++) {
- if (bitVector->get(i))
- storeIndex = forwardSpread(m_graph.varArgChild(arguments, i).node(), storeIndex);
- else {
- LValue value = lowJSValue(m_graph.varArgChild(arguments, i));
- m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, storeIndex));
- storeIndex = m_out.add(m_out.constIntPtr(1), storeIndex);
- }
- }
- } else
- storeIndex = forwardSpread(arguments, storeIndex);
+ LValue storeIndex = forwardSpread(arguments, m_out.constIntPtr(0));
LBasicBlock undefinedLoop = m_out.newBlock();
LBasicBlock continuation = m_out.newBlock();
Modified: trunk/Source/_javascript_Core/ftl/FTLOperations.cpp (226032 => 226033)
--- trunk/Source/_javascript_Core/ftl/FTLOperations.cpp 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/_javascript_Core/ftl/FTLOperations.cpp 2017-12-18 11:49:33 UTC (rev 226033)
@@ -33,6 +33,7 @@
#include "FTLJITCode.h"
#include "FTLLazySlowPath.h"
#include "InlineCallFrame.h"
+#include "Interpreter.h"
#include "JSAsyncFunction.h"
#include "JSAsyncGeneratorFunction.h"
#include "JSCInlines.h"
@@ -90,6 +91,7 @@
case PhantomCreateRest:
case PhantomSpread:
case PhantomNewArrayWithSpread:
+ case PhantomNewArrayBuffer:
// Those are completely handled by operationMaterializeObjectInOSR
break;
@@ -441,6 +443,25 @@
return fixedArray;
}
+ case PhantomNewArrayBuffer: {
+ JSFixedArray* array = nullptr;
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() == NewArrayBufferPLoc) {
+ array = jsCast<JSFixedArray*>(JSValue::decode(values[i]));
+ break;
+ }
+ }
+ RELEASE_ASSERT(array);
+
+ // For now, we use array allocation profile in the actual CodeBlock. It is OK since current NewArrayBuffer
+ // and PhantomNewArrayBuffer are always bound to a specific op_new_array_buffer.
+ CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(materialization->origin(), exec->codeBlock());
+ Instruction* currentInstruction = &codeBlock->instructions()[materialization->origin().bytecodeIndex];
+ RELEASE_ASSERT(Interpreter::getOpcodeID(currentInstruction[0].u.opcode) == op_new_array_buffer);
+ return constructArray(exec, currentInstruction[3].u.arrayAllocationProfile, array->values(), array->length());
+ }
+
case PhantomNewArrayWithSpread: {
CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(
materialization->origin(), exec->codeBlock());
Modified: trunk/Source/WTF/ChangeLog (226032 => 226033)
--- trunk/Source/WTF/ChangeLog 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/WTF/ChangeLog 2017-12-18 11:49:33 UTC (rev 226033)
@@ -1,3 +1,34 @@
+2017-12-17 Yusuke Suzuki <utatane....@gmail.com>
+
+ [FTL] NewArrayBuffer should be sinked if it is only used for spreading
+ https://bugs.webkit.org/show_bug.cgi?id=179762
+
+ Reviewed by Saam Barati.
+
+ We add RecursableLambda<>. This can take a lambda and offer a way
+ to call this lambda recursively without introducing additional allocations.
+
+ C++ lambda is super useful in particular when we need to capture many
+ variables as references. Passing many arguments to a usual function is not
+ a good way. But C++ lambda does not allow us to easily call itself recursively.
+
+ Our recursableLambda offers `self` function as a first argument of the
+ given lambda. We can call this `self` recursively.
+
+ auto targetFunction = recursableLambda([] (auto self, ...) -> resultType {
+ self(...);
+ });
+
+ While `std::function<> func = [&func] { ... }` allows recursion, it causes
+ heap allocation for std::function<>. `auto func = [&func] { ... }` causes
+ a compile error since we need to deduce an actual type when capturing `func`.
+
+ * WTF.xcodeproj/project.pbxproj:
+ * wtf/RecursableLambda.h: Added.
+ (WTF::RecursableLambda::RecursableLambda):
+ (WTF::RecursableLambda::operator() const):
+ (WTF::recursableLambda):
+
2017-12-17 Mark Lam <mark....@apple.com>
Enhance Ref and RefPtr to be able to work with smart pointers.
Modified: trunk/Source/WTF/WTF.xcodeproj/project.pbxproj (226032 => 226033)
--- trunk/Source/WTF/WTF.xcodeproj/project.pbxproj 2017-12-18 11:48:18 UTC (rev 226032)
+++ trunk/Source/WTF/WTF.xcodeproj/project.pbxproj 2017-12-18 11:49:33 UTC (rev 226033)
@@ -604,6 +604,7 @@
E311FB151F0A568B003C08DE /* ThreadGroup.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ThreadGroup.cpp; sourceTree = "<group>"; };
E311FB161F0A568B003C08DE /* ThreadGroup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadGroup.h; sourceTree = "<group>"; };
E3200AB41E9A536D003B59D2 /* PlatformRegisters.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PlatformRegisters.h; sourceTree = "<group>"; };
+ E33D5F871FBED66700BF625E /* RecursableLambda.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RecursableLambda.h; sourceTree = "<group>"; };
E38C41241EB4E04C0042957D /* CPUTimeCocoa.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = CPUTimeCocoa.mm; sourceTree = "<group>"; };
E38C41261EB4E0680042957D /* CPUTime.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTime.cpp; sourceTree = "<group>"; };
E38C41271EB4E0680042957D /* CPUTime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CPUTime.h; sourceTree = "<group>"; };
@@ -821,8 +822,8 @@
0F30CB591FCDF133004B5323 /* ConcurrentPtrHashSet.h */,
0FB467831FDE282C003FCB09 /* ConcurrentVector.h */,
0FDB698D1B7C643A000C1078 /* Condition.h */,
+ 0F8E85DA1FD485B000691889 /* CountingLock.cpp */,
0FFBCBFA1FD37E0F0072AAF0 /* CountingLock.h */,
- 0F8E85DA1FD485B000691889 /* CountingLock.cpp */,
E38C41261EB4E0680042957D /* CPUTime.cpp */,
E38C41271EB4E0680042957D /* CPUTime.h */,
515F794B1CFC9F4A00CCED93 /* CrossThreadCopier.cpp */,
@@ -1000,6 +1001,7 @@
0F87105916643F190090B0AD /* RawPointer.h */,
0FEC3C5C1F368A9700F59B6C /* ReadWriteLock.cpp */,
0FEC3C5D1F368A9700F59B6C /* ReadWriteLock.h */,
+ E33D5F871FBED66700BF625E /* RecursableLambda.h */,
0FDE87F61DFD07CC0064C390 /* RecursiveLockAdapter.h */,
A8A472FE151A825B004123FF /* RedBlackTree.h */,
26299B6D17A9E5B800ADEBE5 /* Ref.h */,
@@ -1418,6 +1420,8 @@
A8A47463151A825B004123FF /* CollatorICU.cpp in Sources */,
FE05FAFF1FE5007500093230 /* RefPtr.cpp in Sources */,
0F8F2B92172E0103007DBDA5 /* CompilationThread.cpp in Sources */,
+ 0F30CB5A1FCDF134004B5323 /* ConcurrentPtrHashSet.cpp in Sources */,
+ 0F8E85DB1FD485B000691889 /* CountingLock.cpp in Sources */,
E38C41281EB4E0680042957D /* CPUTime.cpp in Sources */,
E38C41251EB4E04C0042957D /* CPUTimeCocoa.mm in Sources */,
515F794E1CFC9F4A00CCED93 /* CrossThreadCopier.cpp in Sources */,
@@ -1467,10 +1471,9 @@
0FFF19DC1BB334EB00886D91 /* ParallelHelperPool.cpp in Sources */,
0F824A681B7443A0002E345D /* ParkingLot.cpp in Sources */,
51F1752B1F3D486000C74950 /* PersistentCoders.cpp in Sources */,
- 0F8E85DB1FD485B000691889 /* CountingLock.cpp in Sources */,
51F1752C1F3D486000C74950 /* PersistentDecoder.cpp in Sources */,
- 0F30CB5A1FCDF134004B5323 /* ConcurrentPtrHashSet.cpp in Sources */,
51F1752D1F3D486000C74950 /* PersistentEncoder.cpp in Sources */,
+ FE85416E1FBE285D008DA5DA /* Poisoned.cpp in Sources */,
0F9D3362165DBA73005AD387 /* PrintStream.cpp in Sources */,
143F611F1565F0F900DB514A /* RAMSize.cpp in Sources */,
A3B725EC987446AD93F1A440 /* RandomDevice.cpp in Sources */,
@@ -1482,7 +1485,6 @@
1469419316EAAF6D0024E146 /* RunLoopTimerCF.cpp in Sources */,
1469419916EAB0410024E146 /* SchedulePairCF.cpp in Sources */,
1469419716EAAFF80024E146 /* SchedulePairMac.mm in Sources */,
- FE85416E1FBE285D008DA5DA /* Poisoned.cpp in Sources */,
0F66B28E1DC97BAB004A1D3F /* Seconds.cpp in Sources */,
A8A47421151A825B004123FF /* SHA1.cpp in Sources */,
5311BD531EA71CAD00525281 /* Signals.cpp in Sources */,
Added: trunk/Source/WTF/wtf/RecursableLambda.h (0 => 226033)
--- trunk/Source/WTF/wtf/RecursableLambda.h (rev 0)
+++ trunk/Source/WTF/wtf/RecursableLambda.h 2017-12-18 11:49:33 UTC (rev 226033)
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 Yusuke Suzuki <utatane....@gmail.com>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <type_traits>
+
+namespace WTF {
+
+template<typename Functor>
+class RecursableLambda {
+public:
+ explicit RecursableLambda(Functor&& functor)
+ : m_functor(std::forward<Functor>(functor))
+ {
+ }
+
+ template<typename... Args>
+ decltype(auto) operator()(Args&&... args) const
+ {
+ return m_functor(std::ref(*this), std::forward<Args>(args)...);
+ }
+
+private:
+ Functor m_functor;
+};
+
+template<typename Functor>
+decltype(auto) recursableLambda(Functor&& f)
+{
+ return RecursableLambda<std::decay_t<Functor>>(std::forward<Functor>(f));
+}
+
+} // namespace WTF
+
+using WTF::recursableLambda;