Revision: 24158
Author: [email protected]
Date: Wed Sep 24 00:05:07 2014 UTC
Log: Version 3.29.87 (based on bleeding_edge revision r24156)
Preserve message when rethrowing exception (issue 3583).
Fix escaped index JSON parsing (Chromium issue 416449).
Performance and stability improvements on all platforms.
https://code.google.com/p/v8/source/detail?r=24158
Added:
/trunk/src/compiler/js-builtin-reducer-unittest.cc
/trunk/test/mjsunit/regress/regress-json-parse-index.js
Modified:
/trunk/ChangeLog
/trunk/Makefile
/trunk/PRESUBMIT.py
/trunk/src/api.cc
/trunk/src/arm/code-stubs-arm.cc
/trunk/src/arm/lithium-codegen-arm.cc
/trunk/src/arm/lithium-codegen-arm.h
/trunk/src/arm/macro-assembler-arm.cc
/trunk/src/arm/macro-assembler-arm.h
/trunk/src/arm64/code-stubs-arm64.cc
/trunk/src/arm64/lithium-codegen-arm64.cc
/trunk/src/arm64/lithium-codegen-arm64.h
/trunk/src/arm64/macro-assembler-arm64.cc
/trunk/src/arm64/macro-assembler-arm64.h
/trunk/src/arm64/simulator-arm64.cc
/trunk/src/base/macros.h
/trunk/src/builtins.cc
/trunk/src/builtins.h
/trunk/src/code-stubs-hydrogen.cc
/trunk/src/code-stubs.cc
/trunk/src/code-stubs.h
/trunk/src/compiler/arm/code-generator-arm.cc
/trunk/src/compiler/arm/instruction-selector-arm.cc
/trunk/src/compiler/arm64/code-generator-arm64.cc
/trunk/src/compiler/arm64/instruction-selector-arm64-unittest.cc
/trunk/src/compiler/arm64/instruction-selector-arm64.cc
/trunk/src/compiler/code-generator.cc
/trunk/src/compiler/code-generator.h
/trunk/src/compiler/common-operator-unittest.cc
/trunk/src/compiler/common-operator.cc
/trunk/src/compiler/common-operator.h
/trunk/src/compiler/compiler.gyp
/trunk/src/compiler/graph-unittest.cc
/trunk/src/compiler/graph-unittest.h
/trunk/src/compiler/ia32/code-generator-ia32.cc
/trunk/src/compiler/ia32/instruction-selector-ia32.cc
/trunk/src/compiler/instruction-codes.h
/trunk/src/compiler/instruction-selector-unittest.h
/trunk/src/compiler/js-builtin-reducer.cc
/trunk/src/compiler/js-builtin-reducer.h
/trunk/src/compiler/js-graph.h
/trunk/src/compiler/js-typed-lowering.cc
/trunk/src/compiler/machine-operator-reducer-unittest.cc
/trunk/src/compiler/machine-operator-reducer.cc
/trunk/src/compiler/machine-operator-reducer.h
/trunk/src/compiler/machine-operator-unittest.cc
/trunk/src/compiler/machine-operator.cc
/trunk/src/compiler/machine-operator.h
/trunk/src/compiler/node-matchers.h
/trunk/src/compiler/opcodes.h
/trunk/src/compiler/pipeline.cc
/trunk/src/compiler/raw-machine-assembler.h
/trunk/src/compiler/typer.cc
/trunk/src/compiler/x64/code-generator-x64.cc
/trunk/src/compiler/x64/instruction-selector-x64.cc
/trunk/src/compiler.cc
/trunk/src/conversions-inl.h
/trunk/src/conversions.h
/trunk/src/counters.h
/trunk/src/deoptimizer.h
/trunk/src/elements-kind.h
/trunk/src/heap/gc-idle-time-handler.h
/trunk/src/heap-snapshot-generator-inl.h
/trunk/src/heap-snapshot-generator.cc
/trunk/src/heap-snapshot-generator.h
/trunk/src/hydrogen-instructions.h
/trunk/src/hydrogen.cc
/trunk/src/hydrogen.h
/trunk/src/ia32/code-stubs-ia32.cc
/trunk/src/ia32/lithium-codegen-ia32.cc
/trunk/src/ia32/lithium-codegen-ia32.h
/trunk/src/ia32/macro-assembler-ia32.cc
/trunk/src/ia32/macro-assembler-ia32.h
/trunk/src/ic/arm/ic-arm.cc
/trunk/src/ic/arm/ic-compiler-arm.cc
/trunk/src/ic/arm64/ic-arm64.cc
/trunk/src/ic/arm64/ic-compiler-arm64.cc
/trunk/src/ic/handler-compiler.cc
/trunk/src/ic/ia32/ic-compiler-ia32.cc
/trunk/src/ic/ia32/ic-ia32.cc
/trunk/src/ic/ic-compiler.cc
/trunk/src/ic/ic.cc
/trunk/src/ic/ic.h
/trunk/src/ic/mips/ic-compiler-mips.cc
/trunk/src/ic/mips/ic-mips.cc
/trunk/src/ic/mips64/ic-compiler-mips64.cc
/trunk/src/ic/mips64/ic-mips64.cc
/trunk/src/ic/x64/ic-compiler-x64.cc
/trunk/src/ic/x64/ic-x64.cc
/trunk/src/ic/x87/handler-compiler-x87.cc
/trunk/src/ic/x87/ic-compiler-x87.cc
/trunk/src/ic/x87/ic-x87.cc
/trunk/src/isolate.cc
/trunk/src/json-parser.h
/trunk/src/lithium-codegen.cc
/trunk/src/lithium-codegen.h
/trunk/src/mips/code-stubs-mips.cc
/trunk/src/mips/full-codegen-mips.cc
/trunk/src/mips/lithium-codegen-mips.cc
/trunk/src/mips/lithium-codegen-mips.h
/trunk/src/mips/macro-assembler-mips.cc
/trunk/src/mips/macro-assembler-mips.h
/trunk/src/mips64/code-stubs-mips64.cc
/trunk/src/mips64/full-codegen-mips64.cc
/trunk/src/mips64/lithium-codegen-mips64.cc
/trunk/src/mips64/lithium-codegen-mips64.h
/trunk/src/mips64/macro-assembler-mips64.cc
/trunk/src/mips64/macro-assembler-mips64.h
/trunk/src/objects.h
/trunk/src/runtime.cc
/trunk/src/utils.h
/trunk/src/version.cc
/trunk/src/x64/code-stubs-x64.cc
/trunk/src/x64/lithium-codegen-x64.cc
/trunk/src/x64/lithium-codegen-x64.h
/trunk/src/x64/macro-assembler-x64.cc
/trunk/src/x64/macro-assembler-x64.h
/trunk/src/x87/assembler-x87-inl.h
/trunk/src/x87/assembler-x87.cc
/trunk/src/x87/assembler-x87.h
/trunk/src/x87/builtins-x87.cc
/trunk/src/x87/code-stubs-x87.cc
/trunk/src/x87/code-stubs-x87.h
/trunk/src/x87/codegen-x87.cc
/trunk/src/x87/deoptimizer-x87.cc
/trunk/src/x87/disasm-x87.cc
/trunk/src/x87/full-codegen-x87.cc
/trunk/src/x87/lithium-codegen-x87.cc
/trunk/src/x87/lithium-codegen-x87.h
/trunk/src/x87/lithium-gap-resolver-x87.cc
/trunk/src/x87/lithium-x87.cc
/trunk/src/x87/lithium-x87.h
/trunk/src/x87/macro-assembler-x87.cc
/trunk/src/x87/macro-assembler-x87.h
/trunk/test/cctest/cctest.status
/trunk/test/cctest/compiler/test-js-typed-lowering.cc
/trunk/test/cctest/compiler/test-run-machops.cc
/trunk/test/cctest/test-debug.cc
/trunk/test/cctest/test-disasm-x87.cc
/trunk/test/mjsunit/keyed-named-access.js
/trunk/test/mjsunit/regress/string-set-char-deopt.js
/trunk/testing/gtest-support.h
/trunk/tools/push-to-trunk/auto_push.py
/trunk/tools/push-to-trunk/auto_roll.py
/trunk/tools/push-to-trunk/auto_tag.py
/trunk/tools/push-to-trunk/bump_up_version.py
/trunk/tools/push-to-trunk/chromium_roll.py
/trunk/tools/push-to-trunk/common_includes.py
/trunk/tools/push-to-trunk/merge_to_branch.py
/trunk/tools/push-to-trunk/push_to_trunk.py
/trunk/tools/push-to-trunk/releases.py
/trunk/tools/push-to-trunk/test_scripts.py
=======================================
--- /dev/null
+++ /trunk/src/compiler/js-builtin-reducer-unittest.cc Wed Sep 24 00:05:07
2014 UTC
@@ -0,0 +1,157 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "testing/gmock-support.h"
+
+using testing::Capture;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBuiltinReducerTest : public GraphTest {
+ public:
+ JSBuiltinReducerTest() : javascript_(zone()) {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ Typer typer(zone());
+ MachineOperatorBuilder machine;
+ JSGraph jsgraph(graph(), common(), javascript(), &typer, &machine);
+ JSBuiltinReducer reducer(&jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ Node* Parameter(Type* t, int32_t index = 0) {
+ Node* n = graph()->NewNode(common()->Parameter(index),
graph()->start());
+ NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+ return n;
+ }
+
+ Node* UndefinedConstant() {
+ return HeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+
+namespace {
+
+// TODO(mstarzinger): Find a common place and unify with
test-js-typed-lowering.
+Type* const kNumberTypes[] = {
+ Type::UnsignedSmall(), Type::OtherSignedSmall(),
Type::OtherUnsigned31(),
+ Type::OtherUnsigned32(), Type::OtherSigned32(), Type::SignedSmall(),
+ Type::Signed32(), Type::Unsigned32(), Type::Integral32(),
+ Type::MinusZero(), Type::NaN(), Type::OtherNumber(),
+ Type::OrderedNumber(), Type::Number()};
+
+} // namespace
+
+
+//
-----------------------------------------------------------------------------
+// Math.max
+
+
+TEST_F(JSBuiltinReducerTest, MathMax0) {
+ Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(javascript()->Call(2,
NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant());
+ Reduction r = Reduce(call);
+
+ EXPECT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax1) {
+ Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(javascript()->Call(3,
NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ EXPECT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax2) {
+ Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS),
fun,
+ UndefinedConstant(), p0, p1);
+ Reduction r = Reduce(call);
+
+ if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+ Capture<Node*> branch;
+ EXPECT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(kMachNone, p1, p0,
+ IsMerge(IsIfTrue(CaptureEq(&branch)),
+ IsIfFalse(AllOf(CaptureEq(&branch),
+ IsBranch(IsNumberLessThan(p0,
p1),
+ graph()->start()))))));
+ } else {
+ EXPECT_FALSE(r.Changed());
+ EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ }
+ }
+ }
+}
+
+
+//
-----------------------------------------------------------------------------
+// Math.imul
+
+
+TEST_F(JSBuiltinReducerTest, MathImul) {
+ Handle<JSFunction> f(isolate()->context()->math_imul_fun());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS),
fun,
+ UndefinedConstant(), p0, p1);
+ Reduction r = Reduce(call);
+
+ if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+ EXPECT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
+ } else {
+ EXPECT_FALSE(r.Changed());
+ EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ }
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/regress/regress-json-parse-index.js Wed Sep 24
00:05:07 2014 UTC
@@ -0,0 +1,6 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = JSON.parse('{"\\u0030":100}');
+assertEquals(100, o[0]);
=======================================
--- /trunk/ChangeLog Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/ChangeLog Wed Sep 24 00:05:07 2014 UTC
@@ -1,3 +1,12 @@
+2014-09-24: Version 3.29.87
+
+ Preserve message when rethrowing exception (issue 3583).
+
+ Fix escaped index JSON parsing (Chromium issue 416449).
+
+ Performance and stability improvements on all platforms.
+
+
2014-09-23: Version 3.29.84
Performance and stability improvements on all platforms.
=======================================
--- /trunk/Makefile Fri Aug 8 15:46:17 2014 UTC
+++ /trunk/Makefile Wed Sep 24 00:05:07 2014 UTC
@@ -230,8 +230,8 @@
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
- build/toolchain.gypi samples/samples.gyp src/d8.gyp \
- test/cctest/cctest.gyp tools/gyp/v8.gyp
+ build/toolchain.gypi samples/samples.gyp
src/compiler/compiler.gyp \
+ src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
=======================================
--- /trunk/PRESUBMIT.py Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/PRESUBMIT.py Wed Sep 24 00:05:07 2014 UTC
@@ -34,6 +34,32 @@
import sys
+_EXCLUDED_PATHS = (
+ r"^test[\\\/].*",
+ r"^testing[\\\/].*",
+ r"^third_party[\\\/].*",
+ r"^tools[\\\/].*",
+)
+
+
+# Regular expression that matches code only used for test binaries
+# (best effort).
+_TEST_CODE_EXCLUDED_PATHS = (
+ r'.+-unittest\.cc',
+ # Has a method VisitForTest().
+ r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc',
+ # Test extension.
+ r'src[\\\/]extensions[\\\/]gc-extension\.cc',
+)
+
+
+_TEST_ONLY_WARNING = (
+ 'You might be calling functions intended only for testing from\n'
+ 'production code. It is OK to ignore this warning if you know what\n'
+ 'you are doing, as the heuristics used to detect the situation are\n'
+ 'not perfect. The commit queue will not block on this warning.')
+
+
def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks."""
import sys
@@ -113,6 +139,49 @@
return results
+def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
+ """Attempts to prevent use of functions intended only for testing in
+ non-testing code. For now this is just a best-effort implementation
+ that ignores header files and may have some false positives. A
+ better implementation would probably need a proper C++ parser.
+ """
+ # We only scan .cc files, as the declaration of for-testing functions in
+ # header files are hard to distinguish from calls to such functions
without a
+ # proper C++ parser.
+ file_inclusion_pattern = r'.+\.cc'
+
+ base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
+ inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' %
base_function_pattern)
+ comment_pattern = input_api.re.compile(r'//.*(%s)' %
base_function_pattern)
+ exclusion_pattern = input_api.re.compile(
+ r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
+ base_function_pattern, base_function_pattern))
+
+ def FilterFile(affected_file):
+ black_list = (_EXCLUDED_PATHS +
+ _TEST_CODE_EXCLUDED_PATHS +
+ input_api.DEFAULT_BLACK_LIST)
+ return input_api.FilterSourceFile(
+ affected_file,
+ white_list=(file_inclusion_pattern, ),
+ black_list=black_list)
+
+ problems = []
+ for f in input_api.AffectedSourceFiles(FilterFile):
+ local_path = f.LocalPath()
+ for line_number, line in f.ChangedContents():
+ if (inclusion_pattern.search(line) and
+ not comment_pattern.search(line) and
+ not exclusion_pattern.search(line)):
+ problems.append(
+ '%s:%d\n %s' % (local_path, line_number, line.strip()))
+
+ if problems:
+ return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING,
problems)]
+ else:
+ return []
+
+
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
@@ -122,6 +191,8 @@
input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
+ results.extend(
+ _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
return results
=======================================
--- /trunk/src/api.cc Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/api.cc Wed Sep 24 00:05:07 2014 UTC
@@ -6567,9 +6567,6 @@
Isolate* Isolate::New(const Isolate::CreateParams& params) {
- // TODO(jochen): Remove again soon.
- V8::Initialize();
-
i::Isolate* isolate = new i::Isolate();
Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
if (params.entry_hook) {
=======================================
--- /trunk/src/arm/code-stubs-arm.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm/code-stubs-arm.cc Wed Sep 24 00:05:07 2014 UTC
@@ -3465,8 +3465,8 @@
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(tmp1, &miss);
- __ JumpIfNotUniqueName(tmp2, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
// Unique names are compared by identity.
__ cmp(left, right);
@@ -3698,7 +3698,7 @@
__ ldr(entity_name, FieldMemOperand(entity_name,
HeapObject::kMapOffset));
__ ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ bind(&good);
// Restore the properties.
@@ -3868,7 +3868,7 @@
__ ldr(entry_key, FieldMemOperand(entry_key,
HeapObject::kMapOffset));
__ ldrb(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
=======================================
--- /trunk/src/arm/lithium-codegen-arm.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm/lithium-codegen-arm.cc Wed Sep 24 00:05:07 2014 UTC
@@ -319,30 +319,26 @@
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 7)) {
+ jump_table_.length() * 7)) {
Abort(kGeneratedCodeIsTooLarge);
}
- if (deopt_jump_table_.length() > 0) {
+ if (jump_table_.length() > 0) {
Label needs_frame, call_deopt_entry;
Comment(";;; -------------------- Jump table --------------------");
- Address base = deopt_jump_table_[0].address;
+ Address base = jump_table_[0].address;
Register entry_offset = scratch0();
- int length = deopt_jump_table_.length();
+ int length = jump_table_.length();
for (int i = 0; i < length; i++) {
- Deoptimizer::JumpTableEntry* table_entry = &deopt_jump_table_[i];
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
- Deoptimizer::BailoutType type = table_entry->bailout_type;
- DCHECK(type == deopt_jump_table_[0].bailout_type);
+ DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
Address entry = table_entry->address;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i,
id);
- DeoptComment(table_entry->mnemonic, table_entry->reason);
+ DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so
instead
// of loading the full, absolute address of each one, load an
immediate
@@ -846,7 +842,7 @@
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* reason,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment,
Safepoint::kNoLazyDeopt);
@@ -899,35 +895,35 @@
__ stop("trap_on_deopt", condition);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
- DeoptComment(instr->Mnemonic(), reason);
+ DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().bailout_type != bailout_type) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(),
reason,
-
bailout_type, !frame_is_built_);
- deopt_jump_table_.Add(table_entry, zone());
+ if (jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
}
- __ b(condition, &deopt_jump_table_.last().label);
+ __ b(condition, &jump_table_.last().label);
}
}
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* reason) {
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, reason, bailout_type);
+ DeoptimizeIf(condition, instr, detail, bailout_type);
}
@@ -4976,26 +4972,22 @@
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "cannot truncate");
__ mov(input_reg, Operand::Zero());
} else {
- __ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "not a heap number");
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
- __ RecordComment("Deferred TaggedToI: lost precision or NaN");
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- __ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "minus zero");
}
}
__ bind(&done);
=======================================
--- /trunk/src/arm/lithium-codegen-arm.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm/lithium-codegen-arm.h Wed Sep 24 00:05:07 2014 UTC
@@ -26,7 +26,7 @@
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -235,9 +235,9 @@
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* reason, Deoptimizer::BailoutType
bailout_type);
+ const char* detail, Deoptimizer::BailoutType
bailout_type);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
@@ -332,7 +332,7 @@
void EmitVectorLoadICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
=======================================
--- /trunk/src/arm/macro-assembler-arm.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm/macro-assembler-arm.cc Wed Sep 24 00:05:07 2014 UTC
@@ -3199,8 +3199,8 @@
}
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label*
not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
=======================================
--- /trunk/src/arm/macro-assembler-arm.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm/macro-assembler-arm.h Wed Sep 24 00:05:07 2014 UTC
@@ -1340,7 +1340,7 @@
void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register
scratch,
Label* failure);
- void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label*
not_unique_name);
void EmitSeqStringSetCharCheck(Register string,
Register index,
=======================================
--- /trunk/src/arm64/code-stubs-arm64.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm64/code-stubs-arm64.cc Wed Sep 24 00:05:07 2014 UTC
@@ -3370,8 +3370,8 @@
// To avoid a miss, each instance type should be either SYMBOL_TYPE or it
// should have kInternalizedTag set.
- __ JumpIfNotUniqueName(lhs_instance_type, &miss);
- __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+ __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
+ __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
// Unique names are compared by identity.
STATIC_ASSERT(EQUAL == 0);
@@ -4488,7 +4488,7 @@
__ Ldr(entity_name, FieldMemOperand(entity_name,
HeapObject::kMapOffset));
__ Ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ Bind(&good);
}
@@ -4575,7 +4575,7 @@
// Check if the entry name is not a unique name.
__ Ldr(entry_key, FieldMemOperand(entry_key,
HeapObject::kMapOffset));
__ Ldrb(entry_key, FieldMemOperand(entry_key,
Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
=======================================
--- /trunk/src/arm64/lithium-codegen-arm64.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm64/lithium-codegen-arm64.cc Wed Sep 24 00:05:07 2014 UTC
@@ -839,12 +839,8 @@
Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
__ Bind(&table_entry->label);
- Deoptimizer::BailoutType type = table_entry->bailout_type;
Address entry = table_entry->address;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i,
id);
- DeoptComment(table_entry->mnemonic, table_entry->reason);
+ DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so
instead
// of loading the full, absolute address of each one, load the base
@@ -993,7 +989,7 @@
void LCodeGen::DeoptimizeBranch(
- LInstruction* instr, const char* reason, BranchType branch_type,
+ LInstruction* instr, const char* detail, BranchType branch_type,
Register reg, int bit, Deoptimizer::BailoutType*
override_bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment,
Safepoint::kNoLazyDeopt);
@@ -1044,21 +1040,22 @@
__ Bind(&dont_trap);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller
doubles.
if (branch_type == always &&
frame_is_built_ && !info()->saves_caller_doubles()) {
- DeoptComment(instr->Mnemonic(), reason);
+ DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry* table_entry =
+ new (zone()) Deoptimizer::JumpTableEntry(entry, reason,
bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() || (jump_table_.last()->address != entry) ||
- (jump_table_.last()->bailout_type != bailout_type) ||
- (jump_table_.last()->needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry* table_entry =
- new (zone()) Deoptimizer::JumpTableEntry(
- entry, instr->Mnemonic(), reason,
bailout_type, !frame_is_built_);
+ if (jump_table_.is_empty() ||
+ !table_entry->IsEquivalentTo(*jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
__ B(&jump_table_.last()->label, branch_type, reg, bit);
@@ -1068,78 +1065,78 @@
void LCodeGen::Deoptimize(LInstruction* instr,
Deoptimizer::BailoutType* override_bailout_type,
- const char* reason) {
- DeoptimizeBranch(instr, reason, always, NoReg, -1,
override_bailout_type);
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, always, NoReg, -1,
override_bailout_type);
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- const char* reason) {
- DeoptimizeBranch(instr, reason, static_cast<BranchType>(cond));
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
}
void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
- const char* reason) {
- DeoptimizeBranch(instr, reason, reg_zero, rt);
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_zero, rt);
}
void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- const char* reason) {
- DeoptimizeBranch(instr, reason, reg_not_zero, rt);
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_not_zero, rt);
}
void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
- const char* reason) {
+ const char* detail) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
- DeoptimizeIfBitSet(rt, sign_bit, instr, reason);
+ DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
}
void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
- const char* reason) {
- DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, reason);
+ const char* detail) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
}
void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- const char* reason) {
- DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, reason);
+ const char* detail) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
}
void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* reason) {
+ LInstruction* instr, const char* detail) {
__ CompareRoot(rt, index);
- DeoptimizeIf(eq, instr, reason);
+ DeoptimizeIf(eq, instr, detail);
}
void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char*
reason) {
+ LInstruction* instr, const char*
detail) {
__ CompareRoot(rt, index);
- DeoptimizeIf(ne, instr, reason);
+ DeoptimizeIf(ne, instr, detail);
}
void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction*
instr,
- const char* reason) {
+ const char* detail) {
__ TestForMinusZero(input);
- DeoptimizeIf(vs, instr, reason);
+ DeoptimizeIf(vs, instr, detail);
}
void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction*
instr,
- const char* reason) {
- DeoptimizeBranch(instr, reason, reg_bit_set, rt, bit);
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
}
void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction*
instr,
- const char* reason) {
- DeoptimizeBranch(instr, reason, reg_bit_clear, rt, bit);
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
}
@@ -5629,22 +5626,20 @@
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
- __ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr);
+ DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr,
+ "not a heap number");
// A heap number: load value and convert to int32 using non-truncating
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
- __ RecordComment("Deferred TaggedToI: lost precision or NaN");
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
- __ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIfNegative(scratch1, instr);
+ DeoptimizeIfNegative(scratch1, instr, "minus zero");
}
}
__ Bind(&done);
=======================================
--- /trunk/src/arm64/lithium-codegen-arm64.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm64/lithium-codegen-arm64.h Wed Sep 24 00:05:07 2014 UTC
@@ -213,35 +213,35 @@
Register temp,
LOperand* index,
String::Encoding encoding);
- void DeoptimizeBranch(LInstruction* instr, const char* reason,
+ void DeoptimizeBranch(LInstruction* instr, const char* detail,
BranchType branch_type, Register reg = NoReg,
int bit = -1,
Deoptimizer::BailoutType* override_bailout_type =
NULL);
void Deoptimize(LInstruction* instr,
Deoptimizer::BailoutType* override_bailout_type = NULL,
- const char* reason = NULL);
+ const char* detail = NULL);
void DeoptimizeIf(Condition cond, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
void DeoptimizeIfZero(Register rt, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
void DeoptimizeIfNegative(Register rt, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
void DeoptimizeIfSmi(Register rt, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* reason = NULL);
+ LInstruction* instr, const char* detail = NULL);
void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* reason = NULL);
+ LInstruction* instr, const char* detail = NULL);
void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- const char* reason = NULL);
+ const char* detail = NULL);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
=======================================
--- /trunk/src/arm64/macro-assembler-arm64.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm64/macro-assembler-arm64.cc Wed Sep 24 00:05:07 2014 UTC
@@ -2768,8 +2768,8 @@
}
-void MacroAssembler::JumpIfNotUniqueName(Register type,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
+ Label*
not_unique_name) {
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
// continue
=======================================
--- /trunk/src/arm64/macro-assembler-arm64.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm64/macro-assembler-arm64.h Wed Sep 24 00:05:07 2014 UTC
@@ -1074,7 +1074,7 @@
Register first_object_instance_type, Register
second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
- void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+ void JumpIfNotUniqueNameInstanceType(Register type, Label*
not_unique_name);
// ---- Calling / Jumping helpers ----
=======================================
--- /trunk/src/arm64/simulator-arm64.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/arm64/simulator-arm64.cc Wed Sep 24 00:05:07 2014 UTC
@@ -1855,9 +1855,12 @@
void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
if ((address >= stack_limit_) && (address < stack)) {
fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
- fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n", stack);
- fprintf(stream_, " access was here: 0x%016" PRIx64 "\n", address);
- fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n",
stack_limit_);
+ fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n",
+ static_cast<uint64_t>(stack));
+ fprintf(stream_, " access was here: 0x%016" PRIx64 "\n",
+ static_cast<uint64_t>(address));
+ fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n",
+ static_cast<uint64_t>(stack_limit_));
fprintf(stream_, "\n");
FATAL("ACCESS BELOW STACK POINTER");
}
=======================================
--- /trunk/src/base/macros.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/base/macros.h Wed Sep 24 00:05:07 2014 UTC
@@ -230,7 +230,7 @@
// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
// is likely to surprise you.
template <class Dest, class Source>
-inline Dest bit_cast(const Source& source) {
+V8_INLINE Dest bit_cast(Source const& source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
Dest dest;
=======================================
--- /trunk/src/builtins.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/builtins.cc Wed Sep 24 00:05:07 2014 UTC
@@ -1287,11 +1287,6 @@
static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm);
}
-
-
-static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
- KeyedLoadIC::GenerateSloppyArguments(masm);
-}
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
=======================================
--- /trunk/src/builtins.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/builtins.h Wed Sep 24 00:05:07 2014 UTC
@@ -89,7 +89,6 @@
kNoExtraICState) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC,
kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC,
kNoExtraICState) \
- V(KeyedLoadIC_SloppyArguments, KEYED_LOAD_IC, MONOMORPHIC,
kNoExtraICState) \
\
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC,
StoreIC::kStrictModeState) \
\
=======================================
--- /trunk/src/code-stubs-hydrogen.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/code-stubs-hydrogen.cc Wed Sep 24 00:05:07 2014 UTC
@@ -71,6 +71,8 @@
MULTIPLE
};
+ HValue* UnmappedCase(HValue* elements, HValue* key);
+
HValue* BuildArrayConstructor(ElementsKind kind,
AllocationSiteOverrideMode override_mode,
ArgumentClass argument_class);
@@ -598,6 +600,122 @@
Handle<Code> LoadConstantStub::GenerateCode() { return
DoGenerateCode(this); }
+
+
+HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue*
key) {
+ HValue* result;
+ HInstruction* backing_store = Add<HLoadKeyed>(
+ elements, graph()->GetConstant1(), static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
+ HValue* backing_store_length =
+ Add<HLoadNamedField>(backing_store, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFixedArrayLength());
+ IfBuilder in_unmapped_range(this);
+ in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length,
+ Token::LT);
+ in_unmapped_range.Then();
+ {
+ result = Add<HLoadKeyed>(backing_store, key,
static_cast<HValue*>(NULL),
+ FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
+ }
+ in_unmapped_range.ElseDeopt("Outside of range");
+ in_unmapped_range.End();
+ return result;
+}
+
+
+template <>
+HValue*
CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+ HValue* key = GetParameter(LoadDescriptor::kNameIndex);
+
+ // Mapped arguments are actual arguments. Unmapped arguments are values
added
+ // to the arguments object after it was created for the call. Mapped
arguments
+ // are stored in the context at indexes given by elements[key + 2].
Unmapped
+ // arguments are stored as regular indexed properties in the arguments
array,
+ // held at elements[1]. See NewSloppyArguments() in runtime.cc for a
detailed
+ // look at argument object construction.
+ //
+ // The sloppy arguments elements array has a special format:
+ //
+ // 0: context
+ // 1: unmapped arguments array
+ // 2: mapped_index0,
+ // 3: mapped_index1,
+ // ...
+ //
+ // length is 2 + min(number_of_actual_arguments,
number_of_formal_arguments).
+ // If key + 2 >= elements.length then attempt to look in the unmapped
+ // arguments array (given by elements[1]) and return the value at key,
missing
+ // to the runtime if the unmapped arguments array is not a fixed array
or if
+ // key >= unmapped_arguments_array.length.
+ //
+ // Otherwise, t = elements[key + 2]. If t is the hole, then look up the
value
+ // in the unmapped arguments array, as described above. Otherwise, t is
a Smi
+ // index into the context array given at elements[0]. Return the value at
+ // context[t].
+
+ key = AddUncasted<HForceRepresentation>(key, Representation::Smi());
+ IfBuilder positive_smi(this);
+ positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
+ Token::LT);
+ positive_smi.ThenDeopt("key is negative");
+ positive_smi.End();
+
+ HValue* constant_two = Add<HConstant>(2);
+ HValue* elements = AddLoadElements(receiver, static_cast<HValue*>(NULL));
+ HValue* elements_length =
+ Add<HLoadNamedField>(elements, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFixedArrayLength());
+ HValue* adjusted_length = AddUncasted<HSub>(elements_length,
constant_two);
+ IfBuilder in_range(this);
+ in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT);
+ in_range.Then();
+ {
+ HValue* index = AddUncasted<HAdd>(key, constant_two);
+ HInstruction* mapped_index =
+ Add<HLoadKeyed>(elements, index, static_cast<HValue*>(NULL),
+ FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+
+ IfBuilder is_valid(this);
+ is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
+ graph()->GetConstantHole());
+ is_valid.Then();
+ {
+ // TODO(mvstanton): I'd like to assert from this point, that if the
+ // mapped_index is not the hole that it is indeed, a smi. An
unnecessary
+ // smi check is being emitted.
+ HValue* the_context =
+ Add<HLoadKeyed>(elements, graph()->GetConstant0(),
+ static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ DCHECK(Context::kHeaderSize == FixedArray::kHeaderSize);
+ HValue* result =
+ Add<HLoadKeyed>(the_context, mapped_index,
static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ environment()->Push(result);
+ }
+ is_valid.Else();
+ {
+ HValue* result = UnmappedCase(elements, key);
+ environment()->Push(result);
+ }
+ is_valid.End();
+ }
+ in_range.Else();
+ {
+ HValue* result = UnmappedCase(elements, key);
+ environment()->Push(result);
+ }
+ in_range.End();
+
+ return environment()->Pop();
+}
+
+
+Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
void CodeStubGraphBuilderBase::BuildStoreNamedField(
@@ -1092,7 +1210,6 @@
template <>
HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
StoreGlobalStub* stub = casted_stub();
- Handle<Object> hole(isolate()->heap()->the_hole_value(), isolate());
Handle<Object> placeholer_value(Smi::FromInt(0), isolate());
Handle<PropertyCell> placeholder_cell =
isolate()->factory()->NewPropertyCell(placeholer_value);
@@ -1124,7 +1241,7 @@
// property has been deleted and that the store must be handled by the
// runtime.
IfBuilder builder(this);
- HValue* hole_value = Add<HConstant>(hole);
+ HValue* hole_value = graph()->GetConstantHole();
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
builder.Then();
builder.Deopt("Unexpected cell contents in global store");
=======================================
--- /trunk/src/code-stubs.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/code-stubs.cc Wed Sep 24 00:05:07 2014 UTC
@@ -586,12 +586,14 @@
void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
if (kind() == Code::STORE_IC) {
descriptor->Initialize(FUNCTION_ADDR(StoreIC_MissFromStubFailure));
+ } else if (kind() == Code::KEYED_LOAD_IC) {
+ descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
}
}
CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() {
- if (kind() == Code::LOAD_IC) {
+ if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
return LoadDescriptor(isolate());
} else {
DCHECK_EQ(Code::STORE_IC, kind());
=======================================
--- /trunk/src/code-stubs.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/code-stubs.h Wed Sep 24 00:05:07 2014 UTC
@@ -82,6 +82,7 @@
/* IC Handler stubs */ \
V(LoadConstant) \
V(LoadField) \
+ V(KeyedLoadSloppyArguments) \
V(StoreField) \
V(StoreGlobal) \
V(StringLength)
@@ -914,6 +915,20 @@
};
+class KeyedLoadSloppyArgumentsStub : public HandlerStub {
+ public:
+ explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
+ : HandlerStub(isolate) {}
+
+ protected:
+ virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
+ virtual Code::StubType GetStubType() { return Code::FAST; }
+
+ private:
+ DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
+};
+
+
class LoadConstantStub : public HandlerStub {
public:
LoadConstantStub(Isolate* isolate, int constant_index)
=======================================
--- /trunk/src/compiler/arm/code-generator-arm.cc Tue Sep 23 12:44:49 2014
UTC
+++ /trunk/src/compiler/arm/code-generator-arm.cc Wed Sep 24 00:05:07 2014
UTC
@@ -136,13 +136,8 @@
ArmOperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
- case kArchCallAddress: {
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm(), i.InputRegister(0));
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
- }
case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -156,6 +151,7 @@
break;
}
case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -169,13 +165,6 @@
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArchDrop: {
- int words = MiscField::decode(instr->opcode());
- __ Drop(words);
- DCHECK_LT(0, words);
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
- }
case kArchJmp:
__ b(code_->GetLabel(i.InputBlock(0)));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -855,6 +844,27 @@
void CodeGenerator::AddNopForSmiCodeInlining() {
// On 32-bit ARM we do not insert nops for inlined Smi code.
}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block literal pool emission for duration of padding.
+ v8::internal::Assembler::BlockConstPoolScope
block_const_pool(masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
+ }
+ }
+ }
+ MarkLazyDeoptSite();
+}
#undef __
=======================================
--- /trunk/src/compiler/arm/instruction-selector-arm.cc Tue Sep 23 12:44:49
2014 UTC
+++ /trunk/src/compiler/arm/instruction-selector-arm.cc Wed Sep 24 00:05:07
2014 UTC
@@ -68,10 +68,8 @@
case kArmStrh:
return value >= -255 && value <= 255;
- case kArchCallAddress:
case kArchCallCodeObject:
case kArchCallJSFunction:
- case kArchDrop:
case kArchJmp:
case kArchNop:
case kArchRet:
@@ -803,9 +801,6 @@
opcode = kArchCallCodeObject;
break;
}
- case CallDescriptor::kCallAddress:
- opcode = kArchCallAddress;
- break;
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
@@ -825,13 +820,6 @@
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
-
- // Caller clean up of stack for C-style calls.
- if (descriptor->kind() == CallDescriptor::kCallAddress &&
- !buffer.pushed_nodes.empty()) {
- DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL);
- }
}
=======================================
--- /trunk/src/compiler/arm64/code-generator-arm64.cc Tue Sep 23 12:44:49
2014 UTC
+++ /trunk/src/compiler/arm64/code-generator-arm64.cc Wed Sep 24 00:05:07
2014 UTC
@@ -131,12 +131,8 @@
Arm64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
switch (ArchOpcodeField::decode(opcode)) {
- case kArchCallAddress: {
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm(), i.InputRegister(0));
- break;
- }
case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -149,6 +145,7 @@
break;
}
case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -163,11 +160,6 @@
AddSafepointAndDeopt(instr);
break;
}
- case kArchDrop: {
- int words = MiscField::decode(instr->opcode());
- __ Drop(words);
- break;
- }
case kArchJmp:
__ B(code_->GetLabel(i.InputBlock(0)));
break;
@@ -853,6 +845,29 @@
void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ intptr_t current_pc = masm()->pc_offset();
+
+ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+ intptr_t padding_size = last_lazy_deopt_pc_ + space_needed -
current_pc;
+ DCHECK((padding_size % kInstructionSize) == 0);
+ InstructionAccurateScope instruction_accurate(
+ masm(), padding_size / kInstructionSize);
+
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= kInstructionSize;
+ }
+ }
+ }
+ MarkLazyDeoptSite();
+}
#undef __
=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64-unittest.cc Tue
Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/arm64/instruction-selector-arm64-unittest.cc Wed
Sep 24 00:05:07 2014 UTC
@@ -30,6 +30,26 @@
std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
return os << mi.constructor_name;
}
+
+
+// Helper to build Int32Constant or Int64Constant depending on the given
+// machine type.
+Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType
type,
+ int64_t value) {
+ switch (type) {
+ case kMachInt32:
+ return m.Int32Constant(value);
+ break;
+
+ case kMachInt64:
+ return m.Int64Constant(value);
+ break;
+
+ default:
+ UNIMPLEMENTED();
+ }
+ return NULL;
+}
// ARM64 logical instructions.
@@ -286,13 +306,13 @@
const MachineType type = dpi.machine_type;
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
StreamBuilder m(this, type, type);
- m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ m.Return((m.*dpi.constructor)(m.Parameter(0), BuildConstant(m, type,
imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
- EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
@@ -304,7 +324,7 @@
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
StreamBuilder m(this, type, type);
- m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+ m.Return((m.*dpi.constructor)(BuildConstant(m, type, imm),
m.Parameter(0)));
Stream s = m.Build();
// Add can support an immediate on the left by commuting, but Sub can't
@@ -314,7 +334,7 @@
EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
- EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
@@ -1004,38 +1024,35 @@
TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
const MachInst2 cmp = GetParam();
const MachineType type = cmp.machine_type;
- // TODO(all): Add support for testing 64-bit immediates.
- if (type == kMachInt32) {
- TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- // Compare with 0 are turned into tst instruction.
- if (imm == 0) continue;
- StreamBuilder m(this, type, type);
- m.Return((m.*cmp.constructor)(m.Parameter(0), m.Int32Constant(imm)));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
- EXPECT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(kFlags_set, s[0]->flags_mode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
- }
- TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- // Compare with 0 are turned into tst instruction.
- if (imm == 0) continue;
- StreamBuilder m(this, type, type);
- m.Return((m.*cmp.constructor)(m.Int32Constant(imm), m.Parameter(0)));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
- EXPECT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(kFlags_set, s[0]->flags_mode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
- }
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // Compare with 0 are turned into tst instruction.
+ if (imm == 0) continue;
+ StreamBuilder m(this, type, type);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type,
imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // Compare with 0 are turned into tst instruction.
+ if (imm == 0) continue;
+ StreamBuilder m(this, type, type);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type,
imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
}
}
=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64.cc Tue Sep 23
12:44:49 2014 UTC
+++ /trunk/src/compiler/arm64/instruction-selector-arm64.cc Wed Sep 24
00:05:07 2014 UTC
@@ -37,9 +37,13 @@
}
bool CanBeImmediate(Node* node, ImmediateMode mode) {
- Int32Matcher m(node);
- if (!m.HasValue()) return false;
- int64_t value = m.Value();
+ int64_t value;
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ value = OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ value = OpParameter<int64_t>(node);
+ else
+ return false;
unsigned ignored;
switch (mode) {
case kLogical32Imm:
@@ -107,11 +111,12 @@
// Shared routine for multiple binary operations.
+template <typename Matcher>
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- Int32BinopMatcher m(node);
+ Matcher m(node);
InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
@@ -142,10 +147,11 @@
// Shared routine for multiple binary operations.
+template <typename Matcher>
static void VisitBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode, ImmediateMode operand_mode) {
FlagsContinuation cont;
- VisitBinop(selector, node, opcode, operand_mode, &cont);
+ VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}
@@ -262,22 +268,22 @@
void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop(this, node, kArm64And32, kLogical32Imm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64And32, kLogical32Imm);
}
void InstructionSelector::VisitWord64And(Node* node) {
- VisitBinop(this, node, kArm64And, kLogical64Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64And, kLogical64Imm);
}
void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kArm64Or32, kLogical32Imm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Or32, kLogical32Imm);
}
void InstructionSelector::VisitWord64Or(Node* node) {
- VisitBinop(this, node, kArm64Or, kLogical64Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Or, kLogical64Imm);
}
@@ -287,7 +293,7 @@
if (m.right().Is(-1)) {
Emit(kArm64Not32, g.DefineAsRegister(node),
g.UseRegister(m.left().node()));
} else {
- VisitBinop(this, node, kArm64Xor32, kLogical32Imm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Xor32, kLogical32Imm);
}
}
@@ -298,7 +304,7 @@
if (m.right().Is(-1)) {
Emit(kArm64Not, g.DefineAsRegister(node),
g.UseRegister(m.left().node()));
} else {
- VisitBinop(this, node, kArm64Xor, kLogical32Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Xor, kLogical32Imm);
}
}
@@ -344,12 +350,12 @@
void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBinop(this, node, kArm64Add32, kArithmeticImm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
}
void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop(this, node, kArm64Add, kArithmeticImm);
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
}
@@ -360,7 +366,7 @@
Emit(kArm64Neg32, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
} else {
- VisitBinop(this, node, kArm64Sub32, kArithmeticImm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
}
}
@@ -371,7 +377,7 @@
if (m.left().Is(0)) {
Emit(kArm64Neg, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
} else {
- VisitBinop(this, node, kArm64Sub, kArithmeticImm);
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
}
}
@@ -502,13 +508,13 @@
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation*
cont) {
- VisitBinop(this, node, kArm64Add32, kArithmeticImm, cont);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm,
cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
FlagsContinuation*
cont) {
- VisitBinop(this, node, kArm64Sub32, kArithmeticImm, cont);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm,
cont);
}
@@ -624,12 +630,8 @@
InitializeCallBuffer(call, &buffer, true, false);
// Push the arguments to the stack.
- bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
int aligned_push_count = buffer.pushed_nodes.size();
- if (is_c_frame && pushed_count_uneven) {
- aligned_push_count++;
- }
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
@@ -644,8 +646,7 @@
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
- ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
- Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
+ Emit(kArm64Poke | MiscField::encode(slot), NULL,
g.UseRegister(input));
slot--;
}
// Now all pushes can be done in pairs.
@@ -663,9 +664,6 @@
opcode = kArchCallCodeObject;
break;
}
- case CallDescriptor::kCallAddress:
- opcode = kArchCallAddress;
- break;
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
@@ -685,12 +683,6 @@
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
-
- // Caller clean up of stack for C-style calls.
- if (is_c_frame && aligned_push_count > 0) {
- DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kArchDrop | MiscField::encode(aligned_push_count), NULL);
- }
}
} // namespace compiler
=======================================
--- /trunk/src/compiler/code-generator.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/code-generator.cc Wed Sep 24 00:05:07 2014 UTC
@@ -21,7 +21,8 @@
safepoints_(code->zone()),
deoptimization_states_(code->zone()),
deoptimization_literals_(code->zone()),
- translations_(code->zone()) {}
+ translations_(code->zone()),
+ last_lazy_deopt_pc_(0) {}
Handle<Code> CodeGenerator::GenerateCode() {
@@ -242,6 +243,7 @@
}
if (needs_frame_state) {
+ MarkLazyDeoptSite();
// If the frame state is present, it starts at argument 1
// (just after the code address).
InstructionOperandConverter converter(this, instr);
@@ -387,8 +389,7 @@
isolate()->factory()->NewNumberFromInt(constant.ToInt32());
break;
case Constant::kFloat64:
- constant_object =
- isolate()->factory()->NewHeapNumber(constant.ToFloat64());
+ constant_object =
isolate()->factory()->NewNumber(constant.ToFloat64());
break;
case Constant::kHeapObject:
constant_object = constant.ToHeapObject();
@@ -402,6 +403,11 @@
UNREACHABLE();
}
}
+
+
+void CodeGenerator::MarkLazyDeoptSite() {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
#if !V8_TURBOFAN_BACKEND
=======================================
--- /trunk/src/compiler/code-generator.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/code-generator.h Wed Sep 24 00:05:07 2014 UTC
@@ -98,8 +98,10 @@
void AddTranslationForOperand(Translation* translation, Instruction*
instr,
InstructionOperand* op);
void AddNopForSmiCodeInlining();
+ void EnsureSpaceForLazyDeopt();
+ void MarkLazyDeoptSite();
+
//
===========================================================================
-
struct DeoptimizationState : ZoneObject {
public:
BailoutId bailout_id() const { return bailout_id_; }
@@ -126,6 +128,7 @@
ZoneDeque<DeoptimizationState*> deoptimization_states_;
ZoneDeque<Handle<Object> > deoptimization_literals_;
TranslationBuffer translations_;
+ int last_lazy_deopt_pc_;
};
} // namespace compiler
=======================================
--- /trunk/src/compiler/common-operator-unittest.cc Tue Sep 23 12:44:49
2014 UTC
+++ /trunk/src/compiler/common-operator-unittest.cc Wed Sep 24 00:05:07
2014 UTC
@@ -4,6 +4,8 @@
#include "src/compiler/common-operator.h"
+#include <limits>
+
#include "src/compiler/operator-properties-inl.h"
#include "src/test/test-utils.h"
@@ -132,9 +134,26 @@
const int kArguments[] = {1, 5, 6, 42, 100, 10000, kMaxInt};
+const float kFloat32Values[] = {
+ std::numeric_limits<float>::min(), -1.0f, -0.0f, 0.0f, 1.0f,
+ std::numeric_limits<float>::max()};
+
} // namespace
+TEST_F(CommonOperatorTest, Float32Constant) {
+ TRACED_FOREACH(float, value, kFloat32Values) {
+ const Operator* op = common()->Float32Constant(value);
+ EXPECT_FLOAT_EQ(value, OpParameter<float>(op));
+ EXPECT_EQ(0, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+ }
+}
+
+
TEST_F(CommonOperatorTest, ValueEffect) {
TRACED_FOREACH(int, arguments, kArguments) {
const Operator* op = common()->ValueEffect(arguments);
=======================================
--- /trunk/src/compiler/common-operator.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/common-operator.cc Wed Sep 24 00:05:07 2014 UTC
@@ -135,6 +135,13 @@
return new (zone()) Operator1<int64_t>(
IrOpcode::kInt64Constant, Operator::kPure, 0, 1, "Int64Constant",
value);
}
+
+
+const Operator* CommonOperatorBuilder::Float32Constant(volatile float
value) {
+ return new (zone())
+ Operator1<float>(IrOpcode::kFloat32Constant, Operator::kPure, 0, 1,
+ "Float32Constant", value);
+}
const Operator* CommonOperatorBuilder::Float64Constant(volatile double
value) {
=======================================
--- /trunk/src/compiler/common-operator.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/common-operator.h Wed Sep 24 00:05:07 2014 UTC
@@ -84,6 +84,7 @@
const Operator* Int32Constant(int32_t);
const Operator* Int64Constant(int64_t);
+ const Operator* Float32Constant(volatile float);
const Operator* Float64Constant(volatile double);
const Operator* ExternalConstant(const ExternalReference&);
const Operator* NumberConstant(volatile double);
=======================================
--- /trunk/src/compiler/compiler.gyp Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/src/compiler/compiler.gyp Wed Sep 24 00:05:07 2014 UTC
@@ -26,6 +26,7 @@
'graph-unittest.h',
'instruction-selector-unittest.cc',
'instruction-selector-unittest.h',
+ 'js-builtin-reducer-unittest.cc',
'machine-operator-reducer-unittest.cc',
'machine-operator-unittest.cc',
'simplified-operator-reducer-unittest.cc',
=======================================
--- /trunk/src/compiler/graph-unittest.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/graph-unittest.cc Wed Sep 24 00:05:07 2014 UTC
@@ -44,7 +44,12 @@
}
-Node* GraphTest::Float64Constant(double value) {
+Node* GraphTest::Float32Constant(volatile float value) {
+ return graph()->NewNode(common()->Float32Constant(value));
+}
+
+
+Node* GraphTest::Float64Constant(volatile double value) {
return graph()->NewNode(common()->Float64Constant(value));
}
@@ -59,7 +64,7 @@
}
-Node* GraphTest::NumberConstant(double value) {
+Node* GraphTest::NumberConstant(volatile double value) {
return graph()->NewNode(common()->NumberConstant(value));
}
@@ -662,6 +667,12 @@
return MakeMatcher(
new IsConstantMatcher<int64_t>(IrOpcode::kInt64Constant,
value_matcher));
}
+
+
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<float>(IrOpcode::kFloat32Constant,
value_matcher));
+}
Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher) {
@@ -732,6 +743,7 @@
return MakeMatcher( \
new IsBinopMatcher(IrOpcode::k##Name, lhs_matcher, rhs_matcher)); \
}
+IS_BINOP_MATCHER(NumberLessThan)
IS_BINOP_MATCHER(Word32And)
IS_BINOP_MATCHER(Word32Sar)
IS_BINOP_MATCHER(Word32Shl)
@@ -742,6 +754,7 @@
IS_BINOP_MATCHER(Word64Shl)
IS_BINOP_MATCHER(Word64Equal)
IS_BINOP_MATCHER(Int32AddWithOverflow)
+IS_BINOP_MATCHER(Int32Mul)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
#undef IS_BINOP_MATCHER
=======================================
--- /trunk/src/compiler/graph-unittest.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/graph-unittest.h Wed Sep 24 00:05:07 2014 UTC
@@ -31,10 +31,11 @@
protected:
Node* Parameter(int32_t index);
- Node* Float64Constant(double value);
+ Node* Float32Constant(volatile float value);
+ Node* Float64Constant(volatile double value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
- Node* NumberConstant(double value);
+ Node* NumberConstant(volatile double value);
Node* HeapConstant(const Unique<HeapObject>& value);
Node* FalseConstant();
Node* TrueConstant();
@@ -65,6 +66,7 @@
const Matcher<ExternalReference>& value_matcher);
Matcher<Node*> IsHeapConstant(
const Matcher<Unique<HeapObject> >& value_matcher);
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
@@ -83,6 +85,9 @@
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -114,6 +119,8 @@
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32Mul(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
=======================================
--- /trunk/src/compiler/ia32/code-generator-ia32.cc Tue Sep 23 12:44:49
2014 UTC
+++ /trunk/src/compiler/ia32/code-generator-ia32.cc Wed Sep 24 00:05:07
2014 UTC
@@ -111,16 +111,8 @@
IA32OperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
- case kArchCallAddress:
- if (HasImmediateInput(instr, 0)) {
- // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of
RUNTIME_ENTRY.
- __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
- RelocInfo::RUNTIME_ENTRY);
- } else {
- __ call(i.InputRegister(0));
- }
- break;
case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ call(code, RelocInfo::CODE_TARGET);
@@ -132,6 +124,7 @@
break;
}
case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -142,11 +135,6 @@
AddSafepointAndDeopt(instr);
break;
}
- case kArchDrop: {
- int words = MiscField::decode(instr->opcode());
- __ add(esp, Immediate(kPointerSize * words));
- break;
- }
case kArchJmp:
__ jmp(code()->GetLabel(i.InputBlock(0)));
break;
@@ -945,6 +933,21 @@
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
+ }
+ MarkLazyDeoptSite();
+}
#undef __
=======================================
--- /trunk/src/compiler/ia32/instruction-selector-ia32.cc Tue Sep 23
12:44:49 2014 UTC
+++ /trunk/src/compiler/ia32/instruction-selector-ia32.cc Wed Sep 24
00:05:07 2014 UTC
@@ -531,9 +531,6 @@
opcode = kArchCallCodeObject;
break;
}
- case CallDescriptor::kCallAddress:
- opcode = kArchCallAddress;
- break;
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
@@ -553,13 +550,6 @@
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
-
- // Caller clean up of stack for C-style calls.
- if (descriptor->kind() == CallDescriptor::kCallAddress &&
- buffer.pushed_nodes.size() > 0) {
- DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL);
- }
}
} // namespace compiler
=======================================
--- /trunk/src/compiler/instruction-codes.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/instruction-codes.h Wed Sep 24 00:05:07 2014 UTC
@@ -29,10 +29,8 @@
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define ARCH_OPCODE_LIST(V) \
- V(ArchCallAddress) \
V(ArchCallCodeObject) \
V(ArchCallJSFunction) \
- V(ArchDrop) \
V(ArchJmp) \
V(ArchNop) \
V(ArchRet) \
=======================================
--- /trunk/src/compiler/instruction-selector-unittest.h Tue Sep 23 12:44:49
2014 UTC
+++ /trunk/src/compiler/instruction-selector-unittest.h Wed Sep 24 00:05:07
2014 UTC
@@ -146,6 +146,10 @@
int32_t ToInt32(const InstructionOperand* operand) const {
return ToConstant(operand).ToInt32();
}
+
+ int64_t ToInt64(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToInt64();
+ }
int ToVreg(const InstructionOperand* operand) const {
if (operand->IsConstant()) return operand->index();
=======================================
--- /trunk/src/compiler/js-builtin-reducer.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/js-builtin-reducer.cc Wed Sep 24 00:05:07 2014 UTC
@@ -34,34 +34,49 @@
// constant callee being a well-known builtin with a BuiltinFunctionId.
bool HasBuiltinFunctionId() {
if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
- HeapObjectMatcher<JSFunction> m(NodeProperties::GetValueInput(node_,
0));
- return m.HasValue() &&
m.Value().handle()->shared()->HasBuiltinFunctionId();
+ HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+ if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false;
+ Handle<JSFunction> function =
Handle<JSFunction>::cast(m.Value().handle());
+ return function->shared()->HasBuiltinFunctionId();
}
// Retrieves the BuiltinFunctionId as described above.
BuiltinFunctionId GetBuiltinFunctionId() {
DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
- HeapObjectMatcher<JSFunction> m(NodeProperties::GetValueInput(node_,
0));
- return m.Value().handle()->shared()->builtin_function_id();
+ HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+ Handle<JSFunction> function =
Handle<JSFunction>::cast(m.Value().handle());
+ return function->shared()->builtin_function_id();
}
+
+ // Determines whether the call takes zero inputs.
+ bool InputsMatchZero() { return GetJSCallArity() == 0; }
// Determines whether the call takes one input of the given type.
- bool InputsMatch(Type* t1) {
+ bool InputsMatchOne(Type* t1) {
return GetJSCallArity() == 1 &&
NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1);
}
// Determines whether the call takes two inputs of the given types.
- bool InputsMatch(Type* t1, Type* t2) {
+ bool InputsMatchTwo(Type* t1, Type* t2) {
return GetJSCallArity() == 2 &&
NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1) &&
NodeProperties::GetBounds(GetJSCallInput(1)).upper->Is(t2);
}
+
+ // Determines whether the call takes inputs all of the given type.
+ bool InputsMatchAll(Type* t) {
+ for (int i = 0; i < GetJSCallArity(); i++) {
+ if (!NodeProperties::GetBounds(GetJSCallInput(i)).upper->Is(t)) {
+ return false;
+ }
+ }
+ return true;
+ }
Node* left() { return GetJSCallInput(0); }
Node* right() { return GetJSCallInput(1); }
- protected:
int GetJSCallArity() {
DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
// Skip first (i.e. callee) and second (i.e. receiver) operand.
@@ -78,12 +93,44 @@
private:
Node* node_;
};
+
+
+// ECMA-262, section 15.8.2.11.
+Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchZero()) {
+ // Math.max() -> -Infinity
+ return Replace(jsgraph()->Constant(-V8_INFINITY));
+ }
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.max(a:number) -> a
+ return Replace(r.left());
+ }
+ if (r.InputsMatchAll(Type::Integral32())) {
+ // Math.max(a:int32, b:int32, ...)
+ Node* value = r.GetJSCallInput(0);
+ for (int i = 1; i < r.GetJSCallArity(); i++) {
+ Node* p = r.GetJSCallInput(i);
+ Node* control = graph()->start();
+ Node* tag = graph()->NewNode(simplified()->NumberLessThan(), value,
p);
+
+ Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true,
if_false);
+
+ value = graph()->NewNode(common()->Phi(kMachNone, 2), p, value,
merge);
+ }
+ return Replace(value);
+ }
+ return NoChange();
+}
// ES6 draft 08-24-14, section 20.2.2.19.
Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatch(Type::Integral32(), Type::Integral32())) {
+ if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) {
// Math.imul(a:int32, b:int32) -> Int32Mul(a, b)
Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(),
r.right());
return Replace(value);
@@ -98,6 +145,8 @@
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
+ case kMathMax:
+ return ReplaceWithPureReduction(node, ReduceMathMax(node));
case kMathImul:
return ReplaceWithPureReduction(node, ReduceMathImul(node));
default:
=======================================
--- /trunk/src/compiler/js-builtin-reducer.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/js-builtin-reducer.h Wed Sep 24 00:05:07 2014 UTC
@@ -24,11 +24,13 @@
virtual Reduction Reduce(Node* node) OVERRIDE;
private:
- Graph* graph() { return jsgraph_->graph(); }
- CommonOperatorBuilder* common() { return jsgraph_->common(); }
- MachineOperatorBuilder* machine() { return jsgraph_->machine(); }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Graph* graph() const { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+ MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
JSGraph* jsgraph_;
=======================================
--- /trunk/src/compiler/js-graph.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/js-graph.h Wed Sep 24 00:05:07 2014 UTC
@@ -65,6 +65,9 @@
// Creates a Int32Constant node, usually canonicalized.
Node* Int32Constant(int32_t value);
+ Node* Uint32Constant(uint32_t value) {
+ return Int32Constant(bit_cast<int32_t>(value));
+ }
// Creates a Float64Constant node, usually canonicalized.
Node* Float64Constant(double value);
@@ -109,6 +112,7 @@
Factory* factory() { return isolate()->factory(); }
};
+
} // namespace compiler
} // namespace internal
} // namespace v8
=======================================
--- /trunk/src/compiler/js-typed-lowering.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/js-typed-lowering.cc Wed Sep 24 00:05:07 2014 UTC
@@ -571,13 +571,14 @@
// TODO(mstarzinger): This lowering is not correct if:
// a) The typed array turns external (i.e. MaterializeArrayBuffer)
// b) The typed array or it's buffer is neutered.
- // c) The index is out of bounds
if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
base_type->AsConstant()->Value()->IsJSTypedArray()) {
// JSStoreProperty(typed-array, int32, value)
JSTypedArray* array =
JSTypedArray::cast(*base_type->AsConstant()->Value());
ElementsKind elements_kind = array->map()->elements_kind();
ExternalArrayType type = array->type();
+ uint32_t length;
+ CHECK(array->length()->ToUint32(&length));
ElementAccess element_access;
Node* elements = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
base,
@@ -591,11 +592,24 @@
DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
element_access = AccessBuilder::ForTypedArrayElement(type, false);
}
- Node* store =
- graph()->NewNode(simplified()->StoreElement(element_access),
elements,
- key, value, NodeProperties::GetEffectInput(node),
- NodeProperties::GetControlInput(node));
- return ReplaceEagerly(node, store);
+
+ Node* check = graph()->NewNode(machine()->Uint32LessThan(), key,
+ jsgraph()->Uint32Constant(length));
+ Node* branch = graph()->NewNode(common()->Branch(), check,
+ NodeProperties::GetControlInput(node));
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* store = graph()->NewNode(
+ simplified()->StoreElement(element_access), elements, key, value,
+ NodeProperties::GetEffectInput(node), if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->EffectPhi(2), store,
+ NodeProperties::GetEffectInput(node),
merge);
+
+ return ReplaceWith(phi);
}
return NoChange();
}
=======================================
--- /trunk/src/compiler/machine-operator-reducer-unittest.cc Tue Sep 23
12:44:49 2014 UTC
+++ /trunk/src/compiler/machine-operator-reducer-unittest.cc Wed Sep 24
00:05:07 2014 UTC
@@ -46,6 +46,43 @@
namespace {
+static const float kFloat32Values[] = {
+ -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
+ -1.22813e+35f, -1.20555e+35f, -1.34584e+34f,
+ -1.0079e+32f, -6.49364e+26f, -3.06077e+25f,
+ -1.46821e+25f, -1.17658e+23f, -1.9617e+22f,
+ -2.7357e+20f, -1.48708e+13f, -1.89633e+12f,
+ -4.66622e+11f, -2.22581e+11f, -1.45381e+10f,
+ -1.3956e+09f, -1.32951e+09f, -1.30721e+09f,
+ -1.19756e+09f, -9.26822e+08f, -6.35647e+08f,
+ -4.00037e+08f, -1.81227e+08f, -5.09256e+07f,
+ -964300.0f, -192446.0f, -28455.0f,
+ -27194.0f, -26401.0f, -20575.0f,
+ -17069.0f, -9167.0f, -960.178f,
+ -113.0f, -62.0f, -15.0f,
+ -7.0f, -0.0256635f, -4.60374e-07f,
+ -3.63759e-10f, -4.30175e-14f, -5.27385e-15f,
+ -1.48084e-15f, -1.05755e-19f, -3.2995e-21f,
+ -1.67354e-23f, -1.11885e-23f, -1.78506e-30f,
+ -5.07594e-31f, -3.65799e-31f, -1.43718e-34f,
+ -1.27126e-38f, -0.0f, 0.0f,
+ 1.17549e-38f, 1.56657e-37f, 4.08512e-29f,
+ 3.31357e-28f, 6.25073e-22f, 4.1723e-13f,
+ 1.44343e-09f, 5.27004e-08f, 9.48298e-08f,
+ 5.57888e-07f, 4.89988e-05f, 0.244326f,
+ 12.4895f, 19.0f, 47.0f,
+ 106.0f, 538.324f, 564.536f,
+ 819.124f, 7048.0f, 12611.0f,
+ 19878.0f, 20309.0f, 797056.0f,
+ 1.77219e+09f, 1.51116e+11f, 4.18193e+13f,
+ 3.59167e+16f, 3.38211e+19f, 2.67488e+20f,
+ 1.78831e+21f, 9.20914e+21f, 8.35654e+23f,
+ 1.4495e+24f, 5.94015e+25f, 4.43608e+30f,
+ 2.44502e+33f, 2.61152e+33f, 1.38178e+37f,
+ 1.71306e+37f, 3.31899e+38f, 3.40282e+38f,
+ std::numeric_limits<float>::infinity()};
+
+
static const double kFloat64Values[] = {
-V8_INFINITY, -4.23878e+275, -5.82632e+265, -6.60355e+220,
-6.26172e+212,
-2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127,
-1.55772e+100,
@@ -165,7 +202,7 @@
namespace {
struct UnaryOperator {
- const Operator* (MachineOperatorBuilder::*constructor)() const;
+ const Operator* (MachineOperatorBuilder::*constructor)();
const char* constructor_name;
};
@@ -203,6 +240,20 @@
INSTANTIATE_TEST_CASE_P(MachineOperatorReducerTest,
MachineUnaryOperatorReducerTest,
::testing::ValuesIn(kUnaryOperators));
+
+
+//
-----------------------------------------------------------------------------
+// ChangeFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToFloat32WithConstant) {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat32ToFloat64(), Float32Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(x));
+ }
+}
//
-----------------------------------------------------------------------------
@@ -312,6 +363,31 @@
IsInt64Constant(bit_cast<int64_t>(static_cast<uint64_t>(x))));
}
}
+
+
+//
-----------------------------------------------------------------------------
+// TruncateFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest,
+ TruncateFloat64ToFloat32WithChangeFloat32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToFloat32(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToFloat32(), Float64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
IsFloat32Constant(DoubleToFloat32(x)));
+ }
+}
//
-----------------------------------------------------------------------------
=======================================
--- /trunk/src/compiler/machine-operator-reducer.cc Tue Sep 23 12:44:49
2014 UTC
+++ /trunk/src/compiler/machine-operator-reducer.cc Wed Sep 24 00:05:07
2014 UTC
@@ -19,6 +19,11 @@
MachineOperatorReducer::~MachineOperatorReducer() {}
+
+
+Node* MachineOperatorReducer::Float32Constant(volatile float value) {
+ return graph()->NewNode(common()->Float32Constant(value));
+}
Node* MachineOperatorReducer::Float64Constant(volatile double value) {
@@ -383,6 +388,11 @@
}
break;
}
+ case IrOpcode::kChangeFloat32ToFloat64: {
+ Float32Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(m.Value());
+ break;
+ }
case IrOpcode::kChangeFloat64ToInt32: {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(FastD2I(m.Value()));
@@ -427,6 +437,12 @@
if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
break;
}
+ case IrOpcode::kTruncateFloat64ToFloat32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value()));
+ if (m.IsChangeFloat32ToFloat64()) return
Replace(m.node()->InputAt(0));
+ break;
+ }
// TODO(turbofan): strength-reduce and fold floating point operations.
default:
break;
=======================================
--- /trunk/src/compiler/machine-operator-reducer.h Tue Sep 23 12:44:49 2014
UTC
+++ /trunk/src/compiler/machine-operator-reducer.h Wed Sep 24 00:05:07 2014
UTC
@@ -27,11 +27,15 @@
virtual Reduction Reduce(Node* node) OVERRIDE;
private:
+ Node* Float32Constant(volatile float value);
Node* Float64Constant(volatile double value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
+ Reduction ReplaceFloat32(volatile float value) {
+ return Replace(Float32Constant(value));
+ }
Reduction ReplaceFloat64(volatile double value) {
return Replace(Float64Constant(value));
}
=======================================
--- /trunk/src/compiler/machine-operator-unittest.cc Tue Sep 23 12:44:49
2014 UTC
+++ /trunk/src/compiler/machine-operator-unittest.cc Wed Sep 24 00:05:07
2014 UTC
@@ -169,7 +169,7 @@
namespace {
struct PureOperator {
- const Operator* (MachineOperatorBuilder::*constructor)() const;
+ const Operator* (MachineOperatorBuilder::*constructor)();
IrOpcode::Value opcode;
int value_input_count;
int value_output_count;
@@ -187,32 +187,33 @@
&MachineOperatorBuilder::Name, IrOpcode::k##Name, input_count, \
output_count \
}
- PURE(Word32And, 2, 1), PURE(Word32Or, 2, 1),
- PURE(Word32Xor, 2, 1), PURE(Word32Shl, 2, 1),
- PURE(Word32Shr, 2, 1), PURE(Word32Sar, 2, 1),
- PURE(Word32Ror, 2, 1), PURE(Word32Equal, 2, 1),
- PURE(Word64And, 2, 1), PURE(Word64Or, 2, 1),
- PURE(Word64Xor, 2, 1), PURE(Word64Shl, 2, 1),
- PURE(Word64Shr, 2, 1), PURE(Word64Sar, 2, 1),
- PURE(Word64Ror, 2, 1), PURE(Word64Equal, 2, 1),
- PURE(Int32Add, 2, 1), PURE(Int32AddWithOverflow, 2, 2),
- PURE(Int32Sub, 2, 1), PURE(Int32SubWithOverflow, 2, 2),
- PURE(Int32Mul, 2, 1), PURE(Int32Div, 2, 1),
- PURE(Int32UDiv, 2, 1), PURE(Int32Mod, 2, 1),
- PURE(Int32UMod, 2, 1), PURE(Int32LessThan, 2, 1),
- PURE(Int32LessThanOrEqual, 2, 1), PURE(Uint32LessThan, 2, 1),
- PURE(Uint32LessThanOrEqual, 2, 1), PURE(Int64Add, 2, 1),
- PURE(Int64Sub, 2, 1), PURE(Int64Mul, 2, 1),
- PURE(Int64Div, 2, 1), PURE(Int64UDiv, 2, 1),
- PURE(Int64Mod, 2, 1), PURE(Int64UMod, 2, 1),
- PURE(Int64LessThan, 2, 1), PURE(Int64LessThanOrEqual, 2, 1),
- PURE(ChangeFloat64ToInt32, 1, 1), PURE(ChangeFloat64ToUint32, 1, 1),
- PURE(ChangeInt32ToInt64, 1, 1), PURE(ChangeUint32ToFloat64, 1, 1),
- PURE(ChangeUint32ToUint64, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1),
- PURE(TruncateInt64ToInt32, 1, 1), PURE(Float64Add, 2, 1),
- PURE(Float64Sub, 2, 1), PURE(Float64Mul, 2, 1),
- PURE(Float64Div, 2, 1), PURE(Float64Mod, 2, 1),
- PURE(Float64Equal, 2, 1), PURE(Float64LessThan, 2, 1),
+ PURE(Word32And, 2, 1), PURE(Word32Or, 2, 1),
+ PURE(Word32Xor, 2, 1), PURE(Word32Shl, 2, 1),
+ PURE(Word32Shr, 2, 1), PURE(Word32Sar, 2, 1),
+ PURE(Word32Ror, 2, 1), PURE(Word32Equal, 2, 1),
+ PURE(Word64And, 2, 1), PURE(Word64Or, 2, 1),
+ PURE(Word64Xor, 2, 1), PURE(Word64Shl, 2, 1),
+ PURE(Word64Shr, 2, 1), PURE(Word64Sar, 2, 1),
+ PURE(Word64Ror, 2, 1), PURE(Word64Equal, 2, 1),
+ PURE(Int32Add, 2, 1), PURE(Int32AddWithOverflow, 2, 2),
+ PURE(Int32Sub, 2, 1), PURE(Int32SubWithOverflow, 2, 2),
+ PURE(Int32Mul, 2, 1), PURE(Int32Div, 2, 1),
+ PURE(Int32UDiv, 2, 1), PURE(Int32Mod, 2, 1),
+ PURE(Int32UMod, 2, 1), PURE(Int32LessThan, 2, 1),
+ PURE(Int32LessThanOrEqual, 2, 1), PURE(Uint32LessThan, 2, 1),
+ PURE(Uint32LessThanOrEqual, 2, 1), PURE(Int64Add, 2, 1),
+ PURE(Int64Sub, 2, 1), PURE(Int64Mul, 2, 1),
+ PURE(Int64Div, 2, 1), PURE(Int64UDiv, 2, 1),
+ PURE(Int64Mod, 2, 1), PURE(Int64UMod, 2, 1),
+ PURE(Int64LessThan, 2, 1), PURE(Int64LessThanOrEqual, 2, 1),
+ PURE(ChangeFloat32ToFloat64, 1, 1), PURE(ChangeFloat64ToInt32, 1, 1),
+ PURE(ChangeFloat64ToUint32, 1, 1), PURE(ChangeInt32ToInt64, 1, 1),
+ PURE(ChangeUint32ToFloat64, 1, 1), PURE(ChangeUint32ToUint64, 1, 1),
+ PURE(TruncateFloat64ToFloat32, 1, 1), PURE(TruncateFloat64ToInt32, 1,
1),
+ PURE(TruncateInt64ToInt32, 1, 1), PURE(Float64Add, 2, 1),
+ PURE(Float64Sub, 2, 1), PURE(Float64Mul, 2, 1),
+ PURE(Float64Div, 2, 1), PURE(Float64Mod, 2, 1),
+ PURE(Float64Equal, 2, 1), PURE(Float64LessThan, 2, 1),
PURE(Float64LessThanOrEqual, 2, 1)
#undef PURE
};
=======================================
--- /trunk/src/compiler/machine-operator.cc Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/machine-operator.cc Wed Sep 24 00:05:07 2014 UTC
@@ -97,12 +97,14 @@
V(Int64UMod, Operator::kNoProperties, 2,
1) \
V(Int64LessThan, Operator::kNoProperties, 2,
1) \
V(Int64LessThanOrEqual, Operator::kNoProperties, 2,
1) \
+ V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1,
1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1,
1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1,
1) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1,
1) \
V(ChangeInt32ToInt64, Operator::kNoProperties, 1,
1) \
V(ChangeUint32ToFloat64, Operator::kNoProperties, 1,
1) \
V(ChangeUint32ToUint64, Operator::kNoProperties, 1,
1) \
+ V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1,
1) \
V(TruncateFloat64ToInt32, Operator::kNoProperties, 1,
1) \
V(TruncateInt64ToInt32, Operator::kNoProperties, 1,
1) \
V(Float64Add, Operator::kCommutative, 2,
1) \
@@ -194,14 +196,12 @@
#define PURE(Name, properties, input_count, output_count) \
- const Operator* MachineOperatorBuilder::Name() const { \
- return &impl_.k##Name; \
- }
+ const Operator* MachineOperatorBuilder::Name() { return &impl_.k##Name; }
PURE_OP_LIST(PURE)
#undef PURE
-const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) const
{
+const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
switch (rep) {
#define LOAD(Type) \
case k##Type: \
@@ -217,7 +217,7 @@
}
-const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep)
const {
+const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
switch (rep.machine_type()) {
#define STORE(Type) \
case k##Type: \
=======================================
--- /trunk/src/compiler/machine-operator.h Tue Sep 23 12:44:49 2014 UTC
+++ /trunk/src/compiler/machine-operator.h Wed Sep 24 00:05:07 2014 UTC
@@ -62,84 +62,84 @@
public:
explicit MachineOperatorBuilder(MachineType word = kMachPtr);
- const Operator* Word32And() const WARN_UNUSED_RESULT;
- const Operator* Word32Or() const WARN_UNUSED_RESULT;
- const Operator* Word32Xor() const WARN_UNUSED_RESULT;
- const Operator* Word32Shl() const WARN_UNUSED_RESULT;
- const Operator* Word32Shr() const WARN_UNUSED_RESULT;
- const Operator* Word32Sar() const WARN_UNUSED_RESULT;
- const Operator* Word32Ror() const WARN_UNUSED_RESULT;
- const Operator* Word32Equal() const WARN_UNUSED_RESULT;
+ const Operator* Word32And();
+ const Operator* Word32Or();
+ const Operator* Word32Xor();
+ const Operator* Word32Shl();
+ const Operator* Word32Shr();
+ const Operator* Word32Sar();
+ const Operator* Word32Ror();
+ const Operator* Word32Equal();
- const Operator* Word64And() const WARN_UNUSED_RESULT;
- const Operator* Word64Or() const WARN_UNUSED_RESULT;
- const Operator* Word64Xor() const WARN_UNUSED_RESULT;
- const Operator* Word64Shl() const WARN_UNUSED_RESULT;
- const Operator* Word64Shr() const WARN_UNUSED_RESULT;
- const Operator* Word64Sar() const WARN_UNUSED_RESULT;
- const Operator* Word64Ror() const WARN_UNUSED_RESULT;
- const Operator* Word64Equal() const WARN_UNUSED_RESULT;
+ const Operator* Word64And();
+ const Operator* Word64Or();
+ const Operator* Word64Xor();
+ const Operator* Word64Shl();
+ const Operator* Word64Shr();
+ const Operator* Word64Sar();
+ const Operator* Word64Ror();
+ const Operator* Word64Equal();
- const Operator* Int32Add() const WARN_UNUSED_RESULT;
- const Operator* Int32AddWithOverflow() const WARN_UNUSED_RESULT;
- const Operator* Int32Sub() const WARN_UNUSED_RESULT;
- const Operator* Int32SubWithOverflow() const WARN_UNUSED_RESULT;
- const Operator* Int32Mul() const WARN_UNUSED_RESULT;
- const Operator* Int32Div() const WARN_UNUSED_RESULT;
- const Operator* Int32UDiv() const WARN_UNUSED_RESULT;
- const Operator* Int32Mod() const WARN_UNUSED_RESULT;
- const Operator* Int32UMod() const WARN_UNUSED_RESULT;
- const Operator* Int32LessThan() const WARN_UNUSED_RESULT;
- const Operator* Int32LessThanOrEqual() const WARN_UNUSED_RESULT;
- const Operator* Uint32LessThan() const WARN_UNUSED_RESULT;
- const Operator* Uint32LessThanOrEqual() const WARN_UNUSED_RESULT;
-
- const Operator* Int64Add() const WARN_UNUSED_RESULT;
- const Operator* Int64Sub() const WARN_UNUSED_RESULT;
- const Operator* Int64Mul() const WARN_UNUSED_RESULT;
- const Operator* Int64Div() const WARN_UNUSED_RESULT;
- const Operator* Int64UDiv() const WARN_UNUSED_RESULT;
- const Operator* Int64Mod() const WARN_UNUSED_RESULT;
- const Operator* Int64UMod() const WARN_UNUSED_RESULT;
- const Operator* Int64LessThan() const WARN_UNUSED_RESULT;
- const Operator* Int64LessThanOrEqual() const WARN_UNUSED_RESULT;
-
- // Convert representation of integers between float64 and int32/uint32.
- // The precise rounding mode and handling of out of range inputs are
*not*
- // defined for these operators, since they are intended only for use with
- // integers.
- const Operator* ChangeInt32ToFloat64() const WARN_UNUSED_RESULT;
- const Operator* ChangeUint32ToFloat64() const WARN_UNUSED_RESULT;
- const Operator* ChangeFloat64ToInt32() const WARN_UNUSED_RESULT;
- const Operator* ChangeFloat64ToUint32() const WARN_UNUSED_RESULT;
+ const Operator* Int32Add();
+ const Operator* Int32AddWithOverflow();
+ const Operator* Int32Sub();
+ const Operator* Int32SubWithOverflow();
+ const Operator* Int32Mul();
+ const Operator* Int32Div();
+ const Operator* Int32UDiv();
+ const Operator* Int32Mod();
+ const Operator* Int32UMod();
+ const Operator* Int32LessThan();
+ const Operator* Int32LessThanOrEqual();
+ const Operator* Uint32LessThan();
+ const Operator* Uint32LessThanOrEqual();
- // Sign/zero extend int32/uint32 to int64/uint64.
- const Operator* ChangeInt32ToInt64() const WARN_UNUSED_RESULT;
- const Operator* ChangeUint32ToUint64() const WARN_UNUSED_RESULT;
+ const Operator* Int64Add();
+ const Operator* Int64Sub();
+ const Operator* Int64Mul();
+ const Operator* Int64Div();
+ const Operator* Int64UDiv();
+ const Operator* Int64Mod();
+ const Operator* Int64UMod();
+ const Operator* Int64LessThan();
+ const Operator* Int64LessThanOrEqual();
- // Truncate double to int32 using JavaScript semantics.
- const Operator* TruncateFloat64ToInt32() const WARN_UNUSED_RESULT;
+ // These operators change the representation of numbers while preserving
the
+ // value of the number. Narrowing operators assume the input is
representable
+ // in the target type and are *not* defined for other inputs.
+ // Use narrowing change operators only when there is a static guarantee
that
+ // the input value is representable in the target value.
+ const Operator* ChangeFloat32ToFloat64();
+ const Operator* ChangeFloat64ToInt32(); // narrowing
+ const Operator* ChangeFloat64ToUint32(); // narrowing
+ const Operator* ChangeInt32ToFloat64();
+ const Operator* ChangeInt32ToInt64();
+ const Operator* ChangeUint32ToFloat64();
+ const Operator* ChangeUint32ToUint64();
- // Truncate the high order bits and convert the remaining bits to int32.
- const Operator* TruncateInt64ToInt32() const WARN_UNUSED_RESULT;
+ // These operators truncate numbers, both changing the representation of
+ // the number and mapping multiple input values onto the same output
value.
+ const Operator* TruncateFloat64ToFloat32();
+ const Operator* TruncateFloat64ToInt32(); // JavaScript semantics.
+ const Operator* TruncateInt64ToInt32();
// Floating point operators always operate with IEEE 754
round-to-nearest.
- const Operator* Float64Add() const WARN_UNUSED_RESULT;
- const Operator* Float64Sub() const WARN_UNUSED_RESULT;
- const Operator* Float64Mul() const WARN_UNUSED_RESULT;
- const Operator* Float64Div() const WARN_UNUSED_RESULT;
- const Operator* Float64Mod() const WARN_UNUSED_RESULT;
+ const Operator* Float64Add();
+ const Operator* Float64Sub();
+ const Operator* Float64Mul();
+ const Operator* Float64Div();
+ const Operator* Float64Mod();
// Floating point comparisons complying to IEEE 754.
- const Operator* Float64Equal() const WARN_UNUSED_RESULT;
- const Operator* Float64LessThan() const WARN_UNUSED_RESULT;
- const Operator* Float64LessThanOrEqual() const WARN_UNUSED_RESULT;
+ const Operator* Float64Equal();
+ const Operator* Float64LessThan();
+ const Operator* Float64LessThanOrEqual();
// load [base + index]
- const Operator* Load(LoadRepresentation rep) const WARN_UNUSED_RESULT;
+ const Operator* Load(LoadRepresentation rep);
// store [base + index], value
- const Operator* Store(StoreRepresentation rep) const WARN_UNUSED_RESULT;
+ const Operator* Store(StoreRepresentation rep);
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == kRepWord32; }
@@ -167,7 +167,7 @@
V(Int, LessThan) \
V(Int, LessThanOrEqual)
#define PSEUDO_OP(Prefix, Suffix) \
- const Operator* Prefix##Suffix() const { \
+ const Operator* Prefix##Suffix() { \
return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \
}
PSEUDO_OP_LIST(PSEUDO_OP)
=======================================
***Additional files exist in this changeset.***
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.