Revision: 22785
Author: [email protected]
Date: Fri Aug 1 10:40:37 2014 UTC
Log: Version 3.28.53 (based on bleeding_edge revision r22782)
Performance and stability improvements on all platforms.
http://code.google.com/p/v8/source/detail?r=22785
Added:
/trunk/test/cctest/compiler/test-changes-lowering.cc
Modified:
/trunk/ChangeLog
/trunk/build/toolchain.gypi
/trunk/src/compiler/simplified-lowering.cc
/trunk/src/compiler/simplified-lowering.h
/trunk/src/gc-tracer.cc
/trunk/src/gc-tracer.h
/trunk/src/heap.cc
/trunk/src/heap.h
/trunk/src/incremental-marking.cc
/trunk/src/incremental-marking.h
/trunk/src/mark-compact.cc
/trunk/src/version.cc
/trunk/src/x87/assembler-x87-inl.h
/trunk/src/x87/assembler-x87.cc
/trunk/src/x87/assembler-x87.h
/trunk/src/x87/code-stubs-x87.cc
/trunk/src/x87/disasm-x87.cc
/trunk/src/x87/ic-x87.cc
/trunk/src/x87/lithium-x87.cc
/trunk/src/x87/lithium-x87.h
/trunk/src/x87/stub-cache-x87.cc
/trunk/test/cctest/cctest.gyp
/trunk/test/cctest/compiler/graph-builder-tester.h
/trunk/test/cctest/compiler/test-simplified-lowering.cc
/trunk/test/cctest/test-assembler-x87.cc
/trunk/test/cctest/test-disasm-x87.cc
=======================================
--- /dev/null
+++ /trunk/test/cctest/compiler/test-changes-lowering.cc Fri Aug 1
10:40:37 2014 UTC
@@ -0,0 +1,396 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/compiler/control-builders.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-node-factory.h"
+#include "src/compiler/typer.h"
+#include "src/compiler/verifier.h"
+#include "src/execution.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename ReturnType>
+class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
+ public:
+ explicit ChangesLoweringTester(MachineRepresentation p0 = kMachineLast)
+ : GraphBuilderTester<ReturnType>(p0),
+ typer(this->zone()),
+ source_positions(this->graph()),
+ jsgraph(this->graph(), this->common(), &typer),
+ lowering(&jsgraph, &source_positions),
+ function(Handle<JSFunction>::null()) {}
+
+ Typer typer;
+ SourcePositionTable source_positions;
+ JSGraph jsgraph;
+ SimplifiedLowering lowering;
+ Handle<JSFunction> function;
+
+ Node* start() { return this->graph()->start(); }
+
+ template <typename T>
+ T* CallWithPotentialGC() {
+ // TODO(titzer): we need to wrap the code in a JSFunction and call it
via
+ // Execution::Call() so that the GC knows about the frame, can walk it,
+ // relocate the code object if necessary, etc.
+ // This is pretty ugly and at the least should be moved up to helpers.
+ if (function.is_null()) {
+ function =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Function>::Cast(CompileRun(
+ "(function() { 'use strict'; return 2.7123; })")));
+ CompilationInfoWithZone info(function);
+ CHECK(Parser::Parse(&info));
+ StrictMode strict_mode = info.function()->strict_mode();
+ info.SetStrictMode(strict_mode);
+ info.SetOptimizing(BailoutId::None(),
Handle<Code>(function->code()));
+ CHECK(Rewriter::Rewrite(&info));
+ CHECK(Scope::Analyze(&info));
+ CHECK_NE(NULL, info.scope());
+ Pipeline pipeline(&info);
+ Linkage linkage(&info);
+ Handle<Code> code =
+ pipeline.GenerateCodeForMachineGraph(&linkage, this->graph());
+ CHECK(!code.is_null());
+ function->ReplaceCode(*code);
+ }
+ Handle<Object>* args = NULL;
+ MaybeHandle<Object> result =
+ Execution::Call(this->isolate(), function,
factory()->undefined_value(),
+ 0, args, false);
+ return T::cast(*result.ToHandleChecked());
+ }
+
+ void StoreFloat64(Node* node, double* ptr) {
+ Node* ptr_node = this->PointerConstant(ptr);
+ this->Store(kMachineFloat64, ptr_node, node);
+ }
+
+ Node* LoadInt32(int32_t* ptr) {
+ Node* ptr_node = this->PointerConstant(ptr);
+ return this->Load(kMachineWord32, ptr_node);
+ }
+
+ Node* LoadUint32(uint32_t* ptr) {
+ Node* ptr_node = this->PointerConstant(ptr);
+ return this->Load(kMachineWord32, ptr_node);
+ }
+
+ Node* LoadFloat64(double* ptr) {
+ Node* ptr_node = this->PointerConstant(ptr);
+ return this->Load(kMachineFloat64, ptr_node);
+ }
+
+ void CheckNumber(double expected, Object* number) {
+
CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
+ }
+
+ void BuildAndLower(Operator* op) {
+ // We build a graph by hand here, because the raw machine assembler
+ // does not add the correct control and effect nodes.
+ Node* p0 = this->Parameter(0);
+ Node* change = this->graph()->NewNode(op, p0);
+ Node* ret = this->graph()->NewNode(this->common()->Return(), change,
+ this->start(), this->start());
+ Node* end = this->graph()->NewNode(this->common()->End(), ret);
+ this->graph()->SetEnd(end);
+ this->lowering.Lower(change);
+ Verifier::Run(this->graph());
+ }
+
+ void BuildStoreAndLower(Operator* op, Operator* store_op, void*
location) {
+ // We build a graph by hand here, because the raw machine assembler
+ // does not add the correct control and effect nodes.
+ Node* p0 = this->Parameter(0);
+ Node* change = this->graph()->NewNode(op, p0);
+ Node* store = this->graph()->NewNode(
+ store_op, this->PointerConstant(location), this->Int32Constant(0),
+ change, this->start(), this->start());
+ Node* ret = this->graph()->NewNode(
+ this->common()->Return(), this->Int32Constant(0), store,
this->start());
+ Node* end = this->graph()->NewNode(this->common()->End(), ret);
+ this->graph()->SetEnd(end);
+ this->lowering.Lower(change);
+ Verifier::Run(this->graph());
+ }
+
+ void BuildLoadAndLower(Operator* op, Operator* load_op, void* location) {
+ // We build a graph by hand here, because the raw machine assembler
+ // does not add the correct control and effect nodes.
+ Node* load =
+ this->graph()->NewNode(load_op, this->PointerConstant(location),
+ this->Int32Constant(0), this->start());
+ Node* change = this->graph()->NewNode(op, load);
+ Node* ret = this->graph()->NewNode(this->common()->Return(), change,
+ this->start(), this->start());
+ Node* end = this->graph()->NewNode(this->common()->End(), ret);
+ this->graph()->SetEnd(end);
+ this->lowering.Lower(change);
+ Verifier::Run(this->graph());
+ }
+
+ Factory* factory() { return this->isolate()->factory(); }
+ Heap* heap() { return this->isolate()->heap(); }
+};
+
+
+TEST(RunChangeTaggedToInt32) {
+ // Build and lower a graph by hand.
+ ChangesLoweringTester<int32_t> t(kMachineTagged);
+ t.BuildAndLower(t.simplified()->ChangeTaggedToInt32());
+
+ if (Pipeline::SupportedTarget()) {
+ FOR_INT32_INPUTS(i) {
+ int32_t input = *i;
+
+ if (Smi::IsValid(input)) {
+ int32_t result = t.Call(Smi::FromInt(input));
+ CHECK_EQ(input, result);
+ }
+
+ {
+ Handle<Object> number = t.factory()->NewNumber(input);
+ int32_t result = t.Call(*number);
+ CHECK_EQ(input, result);
+ }
+
+ {
+ Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+ int32_t result = t.Call(*number);
+ CHECK_EQ(input, result);
+ }
+ }
+ }
+}
+
+
+TEST(RunChangeTaggedToUint32) {
+ // Build and lower a graph by hand.
+ ChangesLoweringTester<uint32_t> t(kMachineTagged);
+ t.BuildAndLower(t.simplified()->ChangeTaggedToUint32());
+
+ if (Pipeline::SupportedTarget()) {
+ FOR_UINT32_INPUTS(i) {
+ uint32_t input = *i;
+
+ if (Smi::IsValid(input)) {
+ uint32_t result = t.Call(Smi::FromInt(input));
+ CHECK_EQ(static_cast<int32_t>(input),
static_cast<int32_t>(result));
+ }
+
+ {
+ Handle<Object> number = t.factory()->NewNumber(input);
+ uint32_t result = t.Call(*number);
+ CHECK_EQ(static_cast<int32_t>(input),
static_cast<int32_t>(result));
+ }
+
+ {
+ Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+ uint32_t result = t.Call(*number);
+ CHECK_EQ(static_cast<int32_t>(input),
static_cast<int32_t>(result));
+ }
+ }
+ }
+}
+
+
+TEST(RunChangeTaggedToFloat64) {
+ ChangesLoweringTester<int32_t> t(kMachineTagged);
+ double result;
+
+ t.BuildStoreAndLower(t.simplified()->ChangeTaggedToFloat64(),
+ t.machine()->Store(kMachineFloat64), &result);
+
+ if (Pipeline::SupportedTarget()) {
+ FOR_INT32_INPUTS(i) {
+ int32_t input = *i;
+
+ if (Smi::IsValid(input)) {
+ t.Call(Smi::FromInt(input));
+ CHECK_EQ(input, static_cast<int32_t>(result));
+ }
+
+ {
+ Handle<Object> number = t.factory()->NewNumber(input);
+ t.Call(*number);
+ CHECK_EQ(input, static_cast<int32_t>(result));
+ }
+
+ {
+ Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+ t.Call(*number);
+ CHECK_EQ(input, static_cast<int32_t>(result));
+ }
+ }
+ }
+
+ if (Pipeline::SupportedTarget()) {
+ FOR_FLOAT64_INPUTS(i) {
+ double input = *i;
+ {
+ Handle<Object> number = t.factory()->NewNumber(input);
+ t.Call(*number);
+ CHECK_EQ(input, result);
+ }
+
+ {
+ Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+ t.Call(*number);
+ CHECK_EQ(input, result);
+ }
+ }
+ }
+}
+
+
+TEST(RunChangeBoolToBit) {
+ ChangesLoweringTester<int32_t> t(kMachineTagged);
+ t.BuildAndLower(t.simplified()->ChangeBoolToBit());
+
+ if (Pipeline::SupportedTarget()) {
+ Object* true_obj = t.heap()->true_value();
+ int32_t result = t.Call(true_obj);
+ CHECK_EQ(1, result);
+ }
+
+ if (Pipeline::SupportedTarget()) {
+ Object* false_obj = t.heap()->false_value();
+ int32_t result = t.Call(false_obj);
+ CHECK_EQ(0, result);
+ }
+}
+
+
+TEST(RunChangeBitToBool) {
+ ChangesLoweringTester<Object*> t(kMachineWord32);
+ t.BuildAndLower(t.simplified()->ChangeBitToBool());
+
+ if (Pipeline::SupportedTarget()) {
+ Object* result = t.Call(1);
+ Object* true_obj = t.heap()->true_value();
+ CHECK_EQ(true_obj, result);
+ }
+
+ if (Pipeline::SupportedTarget()) {
+ Object* result = t.Call(0);
+ Object* false_obj = t.heap()->false_value();
+ CHECK_EQ(false_obj, result);
+ }
+}
+
+
+bool TODO_INT32_TO_TAGGED_WILL_WORK(int32_t v) {
+ // TODO(titzer): enable all UI32 -> Tagged checking when inline
allocation
+ // works.
+ return Smi::IsValid(v);
+}
+
+
+bool TODO_UINT32_TO_TAGGED_WILL_WORK(uint32_t v) {
+ // TODO(titzer): enable all UI32 -> Tagged checking when inline
allocation
+ // works.
+ return v <= static_cast<uint32_t>(Smi::kMaxValue);
+}
+
+
+TEST(RunChangeInt32ToTagged) {
+ ChangesLoweringTester<Object*> t;
+ int32_t input;
+ t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
+ t.machine()->Load(kMachineWord32), &input);
+
+ if (Pipeline::SupportedTarget()) {
+ FOR_INT32_INPUTS(i) {
+ input = *i;
+ Object* result = t.CallWithPotentialGC<Object>();
+ if (TODO_INT32_TO_TAGGED_WILL_WORK(input)) {
+ t.CheckNumber(static_cast<double>(input), result);
+ }
+ }
+ }
+
+ if (Pipeline::SupportedTarget()) {
+ FOR_INT32_INPUTS(i) {
+ input = *i;
+ SimulateFullSpace(CcTest::heap()->new_space());
+ Object* result = t.CallWithPotentialGC<Object>();
+ if (TODO_INT32_TO_TAGGED_WILL_WORK(input)) {
+ t.CheckNumber(static_cast<double>(input), result);
+ }
+ }
+ }
+}
+
+
+TEST(RunChangeUint32ToTagged) {
+ ChangesLoweringTester<Object*> t;
+ uint32_t input;
+ t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
+ t.machine()->Load(kMachineWord32), &input);
+
+ if (Pipeline::SupportedTarget()) {
+ FOR_UINT32_INPUTS(i) {
+ input = *i;
+ Object* result = t.CallWithPotentialGC<Object>();
+ double expected = static_cast<double>(input);
+ if (TODO_UINT32_TO_TAGGED_WILL_WORK(input)) {
+ t.CheckNumber(expected, result);
+ }
+ }
+ }
+
+ if (Pipeline::SupportedTarget()) {
+ FOR_UINT32_INPUTS(i) {
+ input = *i;
+ SimulateFullSpace(CcTest::heap()->new_space());
+ Object* result = t.CallWithPotentialGC<Object>();
+ double expected = static_cast<double>(static_cast<uint32_t>(input));
+ if (TODO_UINT32_TO_TAGGED_WILL_WORK(input)) {
+ t.CheckNumber(expected, result);
+ }
+ }
+ }
+}
+
+
+// TODO(titzer): lowering of Float64->Tagged needs inline allocation.
+#define TODO_FLOAT64_TO_TAGGED false
+
+TEST(RunChangeFloat64ToTagged) {
+ ChangesLoweringTester<Object*> t;
+ double input;
+ t.BuildLoadAndLower(t.simplified()->ChangeFloat64ToTagged(),
+ t.machine()->Load(kMachineFloat64), &input);
+
+ // TODO(titzer): need inline allocation to change float to tagged.
+ if (TODO_FLOAT64_TO_TAGGED && Pipeline::SupportedTarget()) {
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ Object* result = t.CallWithPotentialGC<Object>();
+ t.CheckNumber(input, result);
+ }
+ }
+
+ if (TODO_FLOAT64_TO_TAGGED && Pipeline::SupportedTarget()) {
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ SimulateFullSpace(CcTest::heap()->new_space());
+ Object* result = t.CallWithPotentialGC<Object>();
+ t.CheckNumber(input, result);
+ }
+ }
+}
=======================================
--- /trunk/ChangeLog Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/ChangeLog Fri Aug 1 10:40:37 2014 UTC
@@ -1,3 +1,8 @@
+2014-08-01: Version 3.28.53
+
+ Performance and stability improvements on all platforms.
+
+
2014-07-31: Version 3.28.52
Performance and stability improvements on all platforms.
=======================================
--- /trunk/build/toolchain.gypi Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/build/toolchain.gypi Fri Aug 1 10:40:37 2014 UTC
@@ -460,6 +460,8 @@
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
+ }],
+ ['OS=="win" and v8_target_arch=="ia32"', {
'msvs_settings': {
'VCCLCompilerTool': {
# Ensure no surprising artifacts from 80bit double math with
x86.
=======================================
--- /trunk/src/compiler/simplified-lowering.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/simplified-lowering.cc Fri Aug 1 10:40:37 2014 UTC
@@ -5,59 +5,190 @@
#include "src/compiler/simplified-lowering.h"
#include "src/compiler/graph-inl.h"
+#include "src/compiler/node-properties-inl.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
namespace compiler {
-Node* SimplifiedLowering::DoChangeTaggedToInt32(Node* node, Node* effect,
- Node* control) {
- return node;
+Node* SimplifiedLowering::IsTagged(Node* node) {
+ // TODO(titzer): factor this out to a TaggingScheme abstraction.
+ STATIC_ASSERT(kSmiTagMask == 1); // Only works if tag is the low bit.
+ return graph()->NewNode(machine()->WordAnd(), node,
+ jsgraph()->Int32Constant(kSmiTagMask));
}
-Node* SimplifiedLowering::DoChangeTaggedToUint32(Node* node, Node* effect,
- Node* control) {
- return node;
+Node* SimplifiedLowering::Untag(Node* node) {
+ // TODO(titzer): factor this out to a TaggingScheme abstraction.
+ Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize +
kSmiShiftSize);
+ return graph()->NewNode(machine()->WordSar(), node, shift_amount);
}
-Node* SimplifiedLowering::DoChangeTaggedToFloat64(Node* node, Node* effect,
- Node* control) {
- return node;
+Node* SimplifiedLowering::SmiTag(Node* node) {
+ // TODO(titzer): factor this out to a TaggingScheme abstraction.
+ Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize +
kSmiShiftSize);
+ return graph()->NewNode(machine()->WordShl(), node, shift_amount);
}
-Node* SimplifiedLowering::DoChangeInt32ToTagged(Node* node, Node* effect,
- Node* control) {
- return node;
+Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
+ return jsgraph()->Int32Constant(offset - kHeapObjectTag);
}
-Node* SimplifiedLowering::DoChangeUint32ToTagged(Node* node, Node* effect,
+static void UpdateControlSuccessors(Node* before, Node* node) {
+ ASSERT(IrOpcode::IsControlOpcode(before->opcode()));
+ UseIter iter = before->uses().begin();
+ while (iter != before->uses().end()) {
+ if (IrOpcode::IsControlOpcode((*iter)->opcode()) &&
+ NodeProperties::IsControlEdge(iter.edge())) {
+ iter = iter.UpdateToAndIncrement(node);
+ continue;
+ }
+ ++iter;
+ }
+}
+
+
+void SimplifiedLowering::DoChangeTaggedToUI32(Node* node, Node* effect,
+ Node* control, bool
is_signed) {
+ // if (IsTagged(val))
+ // ConvertFloat64To(Int32|Uint32)(Load[kMachineFloat64](input,
#value_offset))
+ // else Untag(val)
+ Node* val = node->InputAt(0);
+ Node* branch = graph()->NewNode(common()->Branch(), IsTagged(val),
control);
+
+ // true branch.
+ Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
+ Node* loaded = graph()->NewNode(
+ machine()->Load(kMachineFloat64), val,
+ OffsetMinusTagConstant(HeapNumber::kValueOffset), effect);
+ Operator* op = is_signed ? machine()->ConvertFloat64ToInt32()
+ : machine()->ConvertFloat64ToUint32();
+ Node* converted = graph()->NewNode(op, loaded);
+
+ // false branch.
+ Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
+ Node* untagged = Untag(val);
+
+ // merge.
+ Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
+ Node* phi = graph()->NewNode(common()->Phi(2), converted, untagged,
merge);
+ UpdateControlSuccessors(control, merge);
+ branch->ReplaceInput(1, control);
+ node->ReplaceUses(phi);
+}
+
+
+void SimplifiedLowering::DoChangeTaggedToFloat64(Node* node, Node* effect,
Node* control) {
- return node;
+ // if (IsTagged(input)) Load[kMachineFloat64](input, #value_offset)
+ // else ConvertFloat64(Untag(input))
+ Node* val = node->InputAt(0);
+ Node* branch = graph()->NewNode(common()->Branch(), IsTagged(val),
control);
+
+ // true branch.
+ Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
+ Node* loaded = graph()->NewNode(
+ machine()->Load(kMachineFloat64), val,
+ OffsetMinusTagConstant(HeapNumber::kValueOffset), effect);
+
+ // false branch.
+ Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
+ Node* untagged = Untag(val);
+ Node* converted =
+ graph()->NewNode(machine()->ConvertInt32ToFloat64(), untagged);
+
+ // merge.
+ Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
+ Node* phi = graph()->NewNode(common()->Phi(2), loaded, converted, merge);
+ UpdateControlSuccessors(control, merge);
+ branch->ReplaceInput(1, control);
+ node->ReplaceUses(phi);
}
-Node* SimplifiedLowering::DoChangeFloat64ToTagged(Node* node, Node* effect,
- Node* control) {
- return node;
+void SimplifiedLowering::DoChangeUI32ToTagged(Node* node, Node* effect,
+ Node* control, bool
is_signed) {
+ Node* val = node->InputAt(0);
+ Node* is_smi = NULL;
+ if (is_signed) {
+ if (SmiValuesAre32Bits()) {
+ // All int32s fit in this case.
+ ASSERT(kPointerSize == 8);
+ return node->ReplaceUses(SmiTag(val));
+ } else {
+ // TODO(turbofan): use an Int32AddWithOverflow to tag and check here.
+ Node* lt = graph()->NewNode(machine()->Int32LessThanOrEqual(), val,
+
jsgraph()->Int32Constant(Smi::kMaxValue));
+ Node* gt =
+ graph()->NewNode(machine()->Int32LessThanOrEqual(),
+ jsgraph()->Int32Constant(Smi::kMinValue), val);
+ is_smi = graph()->NewNode(machine()->Word32And(), lt, gt);
+ }
+ } else {
+ // Check if Uint32 value is in the smi range.
+ is_smi = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val,
+ jsgraph()->Int32Constant(Smi::kMaxValue));
+ }
+
+ // TODO(turbofan): fold smi test branch eagerly.
+ // if (IsSmi(input)) SmiTag(input);
+ // else InlineAllocAndInitHeapNumber(ConvertToFloat64(input)))
+ Node* branch = graph()->NewNode(common()->Branch(), is_smi, control);
+
+ // true branch.
+ Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
+ Node* smi_tagged = SmiTag(val);
+
+ // false branch.
+ Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
+ Node* heap_num = jsgraph()->Constant(0.0); // TODO(titzer): alloc and
init
+
+ // merge.
+ Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
+ Node* phi = graph()->NewNode(common()->Phi(2), smi_tagged, heap_num,
merge);
+ UpdateControlSuccessors(control, merge);
+ branch->ReplaceInput(1, control);
+ node->ReplaceUses(phi);
+}
+
+
+void SimplifiedLowering::DoChangeFloat64ToTagged(Node* node, Node* effect,
+ Node* control) {
+ return; // TODO(titzer): need to call runtime to allocate in one branch
}
-Node* SimplifiedLowering::DoChangeBoolToBit(Node* node, Node* effect,
- Node* control) {
+void SimplifiedLowering::DoChangeBoolToBit(Node* node, Node* effect,
+ Node* control) {
Node* val = node->InputAt(0);
- Operator* op = machine()->WordEqual();
- return graph()->NewNode(op, val, jsgraph()->TrueConstant());
+ Operator* op =
+ kPointerSize == 8 ? machine()->Word64Equal() :
machine()->Word32Equal();
+ Node* cmp = graph()->NewNode(op, val, jsgraph()->TrueConstant());
+ node->ReplaceUses(cmp);
}
+
+void SimplifiedLowering::DoChangeBitToBool(Node* node, Node* effect,
+ Node* control) {
+ Node* val = node->InputAt(0);
+ Node* branch = graph()->NewNode(common()->Branch(), val, control);
-Node* SimplifiedLowering::DoChangeBitToBool(Node* node, Node* effect,
- Node* control) {
- return node;
+ // true branch.
+ Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
+ // false branch.
+ Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
+ // merge.
+ Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
+ Node* phi = graph()->NewNode(common()->Phi(2), jsgraph()->TrueConstant(),
+ jsgraph()->FalseConstant(), merge);
+ UpdateControlSuccessors(control, merge);
+ branch->ReplaceInput(1, control);
+ node->ReplaceUses(phi);
}
@@ -71,18 +202,16 @@
}
-Node* SimplifiedLowering::DoLoadField(Node* node, Node* effect, Node*
control) {
+void SimplifiedLowering::DoLoadField(Node* node, Node* effect, Node*
control) {
const FieldAccess& access = FieldAccessOf(node->op());
node->set_op(machine_.Load(access.representation));
Node* offset =
graph()->NewNode(common()->Int32Constant(access.offset -
kHeapObjectTag));
node->InsertInput(zone(), 1, offset);
- return node;
}
-Node* SimplifiedLowering::DoStoreField(Node* node, Node* effect,
- Node* control) {
+void SimplifiedLowering::DoStoreField(Node* node, Node* effect, Node*
control) {
const FieldAccess& access = FieldAccessOf(node->op());
WriteBarrierKind kind =
ComputeWriteBarrierKind(access.representation, access.type);
@@ -90,7 +219,6 @@
Node* offset =
graph()->NewNode(common()->Int32Constant(access.offset -
kHeapObjectTag));
node->InsertInput(zone(), 1, offset);
- return node;
}
@@ -131,23 +259,21 @@
}
-Node* SimplifiedLowering::DoLoadElement(Node* node, Node* effect,
- Node* control) {
+void SimplifiedLowering::DoLoadElement(Node* node, Node* effect,
+ Node* control) {
const ElementAccess& access = ElementAccessOf(node->op());
node->set_op(machine_.Load(access.representation));
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- return node;
}
-Node* SimplifiedLowering::DoStoreElement(Node* node, Node* effect,
- Node* control) {
+void SimplifiedLowering::DoStoreElement(Node* node, Node* effect,
+ Node* control) {
const ElementAccess& access = ElementAccessOf(node->op());
WriteBarrierKind kind =
ComputeWriteBarrierKind(access.representation, access.type);
node->set_op(machine_.Store(access.representation, kind));
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- return node;
}
@@ -172,25 +298,25 @@
case IrOpcode::kStringAdd:
break;
case IrOpcode::kChangeTaggedToInt32:
- DoChangeTaggedToInt32(node, start, start);
+ DoChangeTaggedToUI32(node, start, start, true);
break;
case IrOpcode::kChangeTaggedToUint32:
- DoChangeTaggedToUint32(node, start, start);
+ DoChangeTaggedToUI32(node, start, start, false);
break;
case IrOpcode::kChangeTaggedToFloat64:
DoChangeTaggedToFloat64(node, start, start);
break;
case IrOpcode::kChangeInt32ToTagged:
- DoChangeInt32ToTagged(node, start, start);
+ DoChangeUI32ToTagged(node, start, start, true);
break;
case IrOpcode::kChangeUint32ToTagged:
- DoChangeUint32ToTagged(node, start, start);
+ DoChangeUI32ToTagged(node, start, start, false);
break;
case IrOpcode::kChangeFloat64ToTagged:
DoChangeFloat64ToTagged(node, start, start);
break;
case IrOpcode::kChangeBoolToBit:
- node->ReplaceUses(DoChangeBoolToBit(node, start, start));
+ DoChangeBoolToBit(node, start, start);
break;
case IrOpcode::kChangeBitToBool:
DoChangeBitToBool(node, start, start);
=======================================
--- /trunk/src/compiler/simplified-lowering.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/simplified-lowering.h Fri Aug 1 10:40:37 2014 UTC
@@ -27,22 +27,28 @@
virtual void Lower(Node* node);
+ // TODO(titzer): These are exposed for direct testing. Use a friend
class.
+ void DoChangeTaggedToUI32(Node* node, Node* effect, Node* control,
+ bool is_signed);
+ void DoChangeUI32ToTagged(Node* node, Node* effect, Node* control,
+ bool is_signed);
+ void DoChangeTaggedToFloat64(Node* node, Node* effect, Node* control);
+ void DoChangeFloat64ToTagged(Node* node, Node* effect, Node* control);
+ void DoChangeBoolToBit(Node* node, Node* effect, Node* control);
+ void DoChangeBitToBool(Node* node, Node* effect, Node* control);
+ void DoLoadField(Node* node, Node* effect, Node* control);
+ void DoStoreField(Node* node, Node* effect, Node* control);
+ void DoLoadElement(Node* node, Node* effect, Node* control);
+ void DoStoreElement(Node* node, Node* effect, Node* control);
+
private:
JSGraph* jsgraph_;
MachineOperatorBuilder machine_;
- Node* DoChangeTaggedToInt32(Node* node, Node* effect, Node* control);
- Node* DoChangeTaggedToUint32(Node* node, Node* effect, Node* control);
- Node* DoChangeTaggedToFloat64(Node* node, Node* effect, Node* control);
- Node* DoChangeInt32ToTagged(Node* node, Node* effect, Node* control);
- Node* DoChangeUint32ToTagged(Node* node, Node* effect, Node* control);
- Node* DoChangeFloat64ToTagged(Node* node, Node* effect, Node* control);
- Node* DoChangeBoolToBit(Node* node, Node* effect, Node* control);
- Node* DoChangeBitToBool(Node* node, Node* effect, Node* control);
- Node* DoLoadField(Node* node, Node* effect, Node* control);
- Node* DoStoreField(Node* node, Node* effect, Node* control);
- Node* DoLoadElement(Node* node, Node* effect, Node* control);
- Node* DoStoreElement(Node* node, Node* effect, Node* control);
+ Node* SmiTag(Node* node);
+ Node* IsTagged(Node* node);
+ Node* Untag(Node* node);
+ Node* OffsetMinusTagConstant(int32_t offset);
Node* ComputeIndex(const ElementAccess& access, Node* index);
=======================================
--- /trunk/src/gc-tracer.cc Tue Jul 29 08:45:47 2014 UTC
+++ /trunk/src/gc-tracer.cc Fri Aug 1 10:40:37 2014 UTC
@@ -75,7 +75,9 @@
cumulative_incremental_marking_steps_(0),
cumulative_incremental_marking_bytes_(0),
cumulative_incremental_marking_duration_(0.0),
- longest_incremental_marking_step_(0.0) {
+ longest_incremental_marking_step_(0.0),
+ cumulative_marking_duration_(0.0),
+ cumulative_sweeping_duration_(0.0) {
current_ = Event(Event::START, NULL, NULL);
current_.end_time = base::OS::TimeCurrentMillis();
previous_ = previous_mark_compactor_event_ = current_;
@@ -174,6 +176,7 @@
cumulative_incremental_marking_duration_ += duration;
longest_incremental_marking_step_ =
Max(longest_incremental_marking_step_, duration);
+ cumulative_marking_duration_ += duration;
}
@@ -280,8 +283,8 @@
PrintF("steps_count=%d ", current_.incremental_marking_steps);
PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
PrintF("longest_step=%.1f ",
current_.longest_incremental_marking_step);
- PrintF("marking_throughput=%" V8_PTR_PREFIX "d ",
- MarkingSpeedInBytesPerMillisecond());
+ PrintF("incremental_marking_throughput=%" V8_PTR_PREFIX "d ",
+ IncrementalMarkingSpeedInBytesPerMillisecond());
}
PrintF("\n");
@@ -355,7 +358,7 @@
}
-intptr_t GCTracer::MarkingSpeedInBytesPerMillisecond() const {
+intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
if (cumulative_incremental_marking_duration_ == 0.0) return 0;
// We haven't completed an entire round of incremental marking, yet.
=======================================
--- /trunk/src/gc-tracer.h Tue Jul 29 08:45:47 2014 UTC
+++ /trunk/src/gc-tracer.h Fri Aug 1 10:40:37 2014 UTC
@@ -219,6 +219,26 @@
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
+
+ // Log time spent in marking.
+ void AddMarkingTime(double duration) {
+ cumulative_marking_duration_ += duration;
+ }
+
+ // Time spent in marking.
+ double cumulative_marking_duration() const {
+ return cumulative_marking_duration_;
+ }
+
+ // Log time spent in sweeping on main thread.
+ void AddSweepingTime(double duration) {
+ cumulative_sweeping_duration_ += duration;
+ }
+
+ // Time spent in sweeping on main thread.
+ double cumulative_sweeping_duration() const {
+ return cumulative_sweeping_duration_;
+ }
// Compute the mean duration of the last scavenger events. Returns 0 if
no
// events have been recorded.
@@ -252,7 +272,7 @@
// Compute the average incremental marking speed in bytes/second.
Returns 0 if
// no events have been recorded.
- intptr_t MarkingSpeedInBytesPerMillisecond() const;
+ intptr_t IncrementalMarkingSpeedInBytesPerMillisecond() const;
private:
// Print one detailed trace line in name=value format.
@@ -301,6 +321,19 @@
// Longest incremental marking step since start of marking.
double longest_incremental_marking_step_;
+ // Total marking time.
+ // This timer is precise when run with --print-cumulative-gc-stat
+ double cumulative_marking_duration_;
+
+ // Total sweeping time on the main thread.
+ // This timer is precise when run with --print-cumulative-gc-stat
+ // TODO(hpayer): Account for sweeping time on sweeper threads. Add a
+ // different field for that.
+ // TODO(hpayer): This timer right now just holds the sweeping time
+ // of the initial atomic sweeping pause. Make sure that it accumulates
+ // all sweeping operations performed on the main thread.
+ double cumulative_sweeping_duration_;
+
DISALLOW_COPY_AND_ASSIGN(GCTracer);
};
}
=======================================
--- /trunk/src/heap.cc Wed Jul 30 00:05:07 2014 UTC
+++ /trunk/src/heap.cc Fri Aug 1 10:40:37 2014 UTC
@@ -5206,8 +5206,8 @@
PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
get_max_alive_after_gc());
- PrintF("total_marking_time=%.1f ", marking_time());
- PrintF("total_sweeping_time=%.1f ", sweeping_time());
+ PrintF("total_marking_time=%.1f ",
tracer_.cumulative_sweeping_duration());
+ PrintF("total_sweeping_time=%.1f ",
tracer_.cumulative_sweeping_duration());
PrintF("\n\n");
}
=======================================
--- /trunk/src/heap.h Wed Jul 30 00:05:07 2014 UTC
+++ /trunk/src/heap.h Fri Aug 1 10:40:37 2014 UTC
@@ -1245,24 +1245,6 @@
// Returns minimal interval between two subsequent collections.
double get_min_in_mutator() { return min_in_mutator_; }
-
- // TODO(hpayer): remove, should be handled by GCTracer
- void AddMarkingTime(double marking_time) {
- marking_time_ += marking_time;
- }
-
- double marking_time() const {
- return marking_time_;
- }
-
- // TODO(hpayer): remove, should be handled by GCTracer
- void AddSweepingTime(double sweeping_time) {
- sweeping_time_ += sweeping_time;
- }
-
- double sweeping_time() const {
- return sweeping_time_;
- }
MarkCompactCollector* mark_compact_collector() {
return &mark_compact_collector_;
=======================================
--- /trunk/src/incremental-marking.cc Tue Jul 29 08:45:47 2014 UTC
+++ /trunk/src/incremental-marking.cc Fri Aug 1 10:40:37 2014 UTC
@@ -677,9 +677,10 @@
}
-void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
+intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t
bytes_to_process) {
+ intptr_t bytes_processed = 0;
Map* filler_map = heap_->one_pointer_filler_map();
- while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
+ while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) {
HeapObject* obj = marking_deque_.Pop();
// Explicitly skip one word fillers. Incremental markbit patterns are
@@ -693,8 +694,9 @@
int delta = (size - unscanned_bytes_of_large_object_);
// TODO(jochen): remove after http://crbug.com/381820 is resolved.
CHECK_LT(0, delta);
- bytes_to_process -= delta;
+ bytes_processed += delta;
}
+ return bytes_processed;
}
@@ -729,7 +731,7 @@
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
double end = base::OS::TimeCurrentMillis();
double delta = end - start;
- heap_->AddMarkingTime(delta);
+ heap_->tracer()->AddMarkingTime(delta);
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
static_cast<int>(delta));
@@ -873,6 +875,7 @@
write_barriers_invoked_since_last_step_ = 0;
bytes_scanned_ += bytes_to_process;
+ intptr_t bytes_processed = 0;
if (state_ == SWEEPING) {
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
@@ -884,7 +887,7 @@
StartMarking(PREVENT_COMPACTION);
}
} else if (state_ == MARKING) {
- ProcessMarkingDeque(bytes_to_process);
+ bytes_processed = ProcessMarkingDeque(bytes_to_process);
if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
@@ -956,8 +959,10 @@
double end = base::OS::TimeCurrentMillis();
double duration = (end - start);
- heap_->tracer()->AddIncrementalMarkingStep(duration, allocated_bytes);
- heap_->AddMarkingTime(duration);
+ // Note that we report zero bytes here when sweeping was in progress or
+ // when we just started incremental marking. In these cases we did not
+ // process the marking deque.
+ heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
}
}
=======================================
--- /trunk/src/incremental-marking.h Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/src/incremental-marking.h Fri Aug 1 10:40:37 2014 UTC
@@ -202,7 +202,7 @@
INLINE(void ProcessMarkingDeque());
- INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process));
+ INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process));
INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
=======================================
--- /trunk/src/mark-compact.cc Thu Jul 31 00:04:39 2014 UTC
+++ /trunk/src/mark-compact.cc Fri Aug 1 10:40:37 2014 UTC
@@ -2291,6 +2291,10 @@
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
+ double start_time = 0.0;
+ if (FLAG_print_cumulative_gc_stat) {
+ start_time = base::OS::TimeCurrentMillis();
+ }
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
@@ -2395,6 +2399,10 @@
ProcessEphemeralMarking(&root_visitor);
AfterMarking();
+
+ if (FLAG_print_cumulative_gc_stat) {
+ heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() -
start_time);
+ }
}
@@ -3283,11 +3291,6 @@
ASSERT((p->skip_list() == NULL) || (skip_list_mode ==
REBUILD_SKIP_LIST));
ASSERT(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
sweeping_mode == SWEEP_ONLY);
-
- double start_time = 0.0;
- if (FLAG_print_cumulative_gc_stat) {
- start_time = base::OS::TimeCurrentMillis();
- }
Address free_start = p->area_start();
ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) ==
0);
@@ -3359,9 +3362,6 @@
#endif
}
p->ResetLiveBytes();
- if (FLAG_print_cumulative_gc_stat) {
- space->heap()->AddSweepingTime(base::OS::TimeCurrentMillis() -
start_time);
- }
if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
// When concurrent sweeping is active, the page will be marked after
@@ -4308,6 +4308,11 @@
void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
+ double start_time = 0.0;
+ if (FLAG_print_cumulative_gc_stat) {
+ start_time = base::OS::TimeCurrentMillis();
+ }
+
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
@@ -4372,6 +4377,11 @@
// Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
+
+ if (FLAG_print_cumulative_gc_stat) {
+ heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
+ start_time);
+ }
}
=======================================
--- /trunk/src/version.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/version.cc Fri Aug 1 10:40:37 2014 UTC
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 28
-#define BUILD_NUMBER 52
+#define BUILD_NUMBER 53
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
=======================================
--- /trunk/src/x87/assembler-x87-inl.h Wed Jul 16 00:04:33 2014 UTC
+++ /trunk/src/x87/assembler-x87-inl.h Fri Aug 1 10:40:37 2014 UTC
@@ -555,6 +555,12 @@
set_dispr(disp, rmode);
}
+
+Operand::Operand(Immediate imm) {
+ // [disp/r]
+ set_modrm(0, ebp);
+ set_dispr(imm.x_, imm.rmode_);
+}
} } // namespace v8::internal
#endif // V8_X87_ASSEMBLER_X87_INL_H_
=======================================
--- /trunk/src/x87/assembler-x87.cc Tue Jul 1 11:58:10 2014 UTC
+++ /trunk/src/x87/assembler-x87.cc Fri Aug 1 10:40:37 2014 UTC
@@ -565,6 +565,13 @@
EMIT(0xC0 | src.code() << 3 | dst.code());
}
}
+
+
+void Assembler::xchg(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x87);
+ emit_operand(dst, src);
+}
void Assembler::adc(Register dst, int32_t imm32) {
@@ -750,10 +757,17 @@
}
-void Assembler::idiv(Register src) {
+void Assembler::idiv(const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
- EMIT(0xF8 | src.code());
+ emit_operand(edi, src);
+}
+
+
+void Assembler::div(const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ emit_operand(esi, src);
}
@@ -773,14 +787,19 @@
void Assembler::imul(Register dst, Register src, int32_t imm32) {
+ imul(dst, Operand(src), imm32);
+}
+
+
+void Assembler::imul(Register dst, const Operand& src, int32_t imm32) {
EnsureSpace ensure_space(this);
if (is_int8(imm32)) {
EMIT(0x6B);
- EMIT(0xC0 | dst.code() << 3 | src.code());
+ emit_operand(dst, src);
EMIT(imm32);
} else {
EMIT(0x69);
- EMIT(0xC0 | dst.code() << 3 | src.code());
+ emit_operand(dst, src);
emit(imm32);
}
}
@@ -818,6 +837,13 @@
EMIT(0xF7);
EMIT(0xD8 | dst.code());
}
+
+
+void Assembler::neg(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ emit_operand(ebx, dst);
+}
void Assembler::not_(Register dst) {
@@ -825,6 +851,13 @@
EMIT(0xF7);
EMIT(0xD0 | dst.code());
}
+
+
+void Assembler::not_(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ emit_operand(edx, dst);
+}
void Assembler::or_(Register dst, int32_t imm32) {
@@ -902,24 +935,24 @@
}
-void Assembler::sar(Register dst, uint8_t imm8) {
+void Assembler::sar(const Operand& dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
- EMIT(0xF8 | dst.code());
+ emit_operand(edi, dst);
} else {
EMIT(0xC1);
- EMIT(0xF8 | dst.code());
+ emit_operand(edi, dst);
EMIT(imm8);
}
}
-void Assembler::sar_cl(Register dst) {
+void Assembler::sar_cl(const Operand& dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
- EMIT(0xF8 | dst.code());
+ emit_operand(edi, dst);
}
@@ -938,24 +971,24 @@
}
-void Assembler::shl(Register dst, uint8_t imm8) {
+void Assembler::shl(const Operand& dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
- EMIT(0xE0 | dst.code());
+ emit_operand(esp, dst);
} else {
EMIT(0xC1);
- EMIT(0xE0 | dst.code());
+ emit_operand(esp, dst);
EMIT(imm8);
}
}
-void Assembler::shl_cl(Register dst) {
+void Assembler::shl_cl(const Operand& dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
- EMIT(0xE0 | dst.code());
+ emit_operand(esp, dst);
}
@@ -967,24 +1000,24 @@
}
-void Assembler::shr(Register dst, uint8_t imm8) {
+void Assembler::shr(const Operand& dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
- EMIT(0xE8 | dst.code());
+ emit_operand(ebp, dst);
} else {
EMIT(0xC1);
- EMIT(0xE8 | dst.code());
+ emit_operand(ebp, dst);
EMIT(imm8);
}
}
-void Assembler::shr_cl(Register dst) {
+void Assembler::shr_cl(const Operand& dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
- EMIT(0xE8 | dst.code());
+ emit_operand(ebp, dst);
}
=======================================
--- /trunk/src/x87/assembler-x87.h Fri Jun 6 00:04:56 2014 UTC
+++ /trunk/src/x87/assembler-x87.h Fri Aug 1 10:40:37 2014 UTC
@@ -292,6 +292,7 @@
int x_;
RelocInfo::Mode rmode_;
+ friend class Operand;
friend class Assembler;
friend class MacroAssembler;
};
@@ -314,9 +315,14 @@
class Operand BASE_EMBEDDED {
public:
+ // reg
+ INLINE(explicit Operand(Register reg));
+
// [disp/r]
INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
- // disp only must always be relocated
+
+ // [disp/r]
+ INLINE(explicit Operand(Immediate imm));
// [base + disp/r]
explicit Operand(Register base, int32_t disp,
@@ -352,6 +358,10 @@
return Operand(reinterpret_cast<int32_t>(cell.location()),
RelocInfo::CELL);
}
+
+ static Operand ForRegisterPlusImmediate(Register base, Immediate imm) {
+ return Operand(base, imm.x_, imm.rmode_);
+ }
// Returns true if this Operand is a wrapper for the specified register.
bool is_reg(Register reg) const;
@@ -364,9 +374,6 @@
Register reg() const;
private:
- // reg
- INLINE(explicit Operand(Register reg));
-
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
inline void set_modrm(int mod, Register rm);
@@ -383,7 +390,6 @@
friend class Assembler;
friend class MacroAssembler;
- friend class LCodeGen;
};
@@ -630,8 +636,9 @@
void rep_stos();
void stos();
- // Exchange two registers
+ // Exchange
void xchg(Register dst, Register src);
+ void xchg(Register dst, const Operand& src);
// Arithmetics
void adc(Register dst, int32_t imm32);
@@ -673,13 +680,17 @@
void cdq();
- void idiv(Register src);
+ void idiv(Register src) { idiv(Operand(src)); }
+ void idiv(const Operand& src);
+ void div(Register src) { div(Operand(src)); }
+ void div(const Operand& src);
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax
* src.
void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, const Operand& src); // dst = dst *
src.
void imul(Register dst, Register src, int32_t imm32); // dst = src *
imm32.
+ void imul(Register dst, const Operand& src, int32_t imm32);
void inc(Register dst);
void inc(const Operand& dst);
@@ -690,8 +701,10 @@
void mul(Register src); // edx:eax = eax
* reg.
void neg(Register dst);
+ void neg(const Operand& dst);
void not_(Register dst);
+ void not_(const Operand& dst);
void or_(Register dst, int32_t imm32);
void or_(Register dst, Register src) { or_(dst, Operand(src)); }
@@ -705,22 +718,28 @@
void ror(Register dst, uint8_t imm8);
void ror_cl(Register dst);
- void sar(Register dst, uint8_t imm8);
- void sar_cl(Register dst);
+ void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); }
+ void sar(const Operand& dst, uint8_t imm8);
+ void sar_cl(Register dst) { sar_cl(Operand(dst)); }
+ void sar_cl(const Operand& dst);
void sbb(Register dst, const Operand& src);
void shld(Register dst, Register src) { shld(dst, Operand(src)); }
void shld(Register dst, const Operand& src);
- void shl(Register dst, uint8_t imm8);
- void shl_cl(Register dst);
+ void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
+ void shl(const Operand& dst, uint8_t imm8);
+ void shl_cl(Register dst) { shl_cl(Operand(dst)); }
+ void shl_cl(const Operand& dst);
void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
void shrd(Register dst, const Operand& src);
- void shr(Register dst, uint8_t imm8);
- void shr_cl(Register dst);
+ void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
+ void shr(const Operand& dst, uint8_t imm8);
+ void shr_cl(Register dst) { shr_cl(Operand(dst)); }
+ void shr_cl(const Operand& dst);
void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
void sub(const Operand& dst, const Immediate& x);
=======================================
--- /trunk/src/x87/code-stubs-x87.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/x87/code-stubs-x87.cc Fri Aug 1 10:40:37 2014 UTC
@@ -82,6 +82,27 @@
Register registers[] = { esi, ebx, edx };
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
}
+
+
+void CallFunctionStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = {esi, edi};
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void CallConstructStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // eax : number of arguments
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // edi : constructor function
+ // TODO(turbofan): So far we don't gather type feedback and hence skip
the
+ // slot parameter, but ArrayConstructStub needs the vector to be
undefined.
+ Register registers[] = {esi, eax, edi, ebx};
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
void RegExpConstructResultStub::InitializeInterfaceDescriptor(
@@ -2446,7 +2467,7 @@
// If there is a call site cache don't look in the global cache, but do
the
// real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
+ if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
// Look up the function and the map in the instanceof cache.
Label miss;
__ CompareRoot(function, scratch,
Heap::kInstanceofCacheFunctionRootIndex);
@@ -2505,6 +2526,9 @@
if (!HasCallSiteInlineCheck()) {
__ mov(eax, Immediate(0));
__ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ mov(eax, factory->true_value());
+ }
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->true_value());
@@ -2525,6 +2549,9 @@
if (!HasCallSiteInlineCheck()) {
__ mov(eax, Immediate(Smi::FromInt(1)));
__ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ mov(eax, factory->false_value());
+ }
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->false_value());
@@ -2552,20 +2579,32 @@
// Null is not instance of anything.
__ cmp(object, factory->null_value());
__ j(not_equal, &object_not_null, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ mov(eax, factory->false_value());
+ } else {
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ }
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null);
// Smi values is not instance of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ mov(eax, factory->false_value());
+ } else {
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ }
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null_or_smi);
// String values is not instance of anything.
Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
__ j(NegateCondition(is_string), &slow, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ mov(eax, factory->false_value());
+ } else {
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ }
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
=======================================
--- /trunk/src/x87/disasm-x87.cc Mon Jun 23 08:21:41 2014 UTC
+++ /trunk/src/x87/disasm-x87.cc Fri Aug 1 10:40:37 2014 UTC
@@ -529,77 +529,94 @@
// Returns number of bytes used, including *data.
int DisassemblerX87::F7Instruction(byte* data) {
ASSERT_EQ(0xF7, *data);
- byte modrm = *(data+1);
+ byte modrm = *++data;
int mod, regop, rm;
get_modrm(modrm, &mod, ®op, &rm);
- if (mod == 3 && regop != 0) {
- const char* mnem = NULL;
- switch (regop) {
- case 2: mnem = "not"; break;
- case 3: mnem = "neg"; break;
- case 4: mnem = "mul"; break;
- case 5: mnem = "imul"; break;
- case 7: mnem = "idiv"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
- return 2;
- } else if (mod == 3 && regop == eax) {
- int32_t imm = *reinterpret_cast<int32_t*>(data+2);
- AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
- return 6;
- } else if (regop == eax) {
- AppendToBuffer("test ");
- int count = PrintRightOperand(data+1);
- int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
- AppendToBuffer(",0x%x", imm);
- return 1+count+4 /*int32_t*/;
- } else {
- UnimplementedInstruction();
- return 2;
+ const char* mnem = NULL;
+ switch (regop) {
+ case 0:
+ mnem = "test";
+ break;
+ case 2:
+ mnem = "not";
+ break;
+ case 3:
+ mnem = "neg";
+ break;
+ case 4:
+ mnem = "mul";
+ break;
+ case 5:
+ mnem = "imul";
+ break;
+ case 6:
+ mnem = "div";
+ break;
+ case 7:
+ mnem = "idiv";
+ break;
+ default:
+ UnimplementedInstruction();
}
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data);
+ if (regop == 0) {
+ AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + count));
+ count += 4;
+ }
+ return 1 + count;
}
int DisassemblerX87::D1D3C1Instruction(byte* data) {
byte op = *data;
ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
- byte modrm = *(data+1);
+ byte modrm = *++data;
int mod, regop, rm;
get_modrm(modrm, &mod, ®op, &rm);
int imm8 = -1;
- int num_bytes = 2;
- if (mod == 3) {
- const char* mnem = NULL;
- switch (regop) {
- case kROL: mnem = "rol"; break;
- case kROR: mnem = "ror"; break;
- case kRCL: mnem = "rcl"; break;
- case kRCR: mnem = "rcr"; break;
- case kSHL: mnem = "shl"; break;
- case KSHR: mnem = "shr"; break;
- case kSAR: mnem = "sar"; break;
- default: UnimplementedInstruction();
- }
- if (op == 0xD1) {
- imm8 = 1;
- } else if (op == 0xC1) {
- imm8 = *(data+2);
- num_bytes = 3;
- } else if (op == 0xD3) {
- // Shift/rotate by cl.
- }
- ASSERT_NE(NULL, mnem);
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
- if (imm8 >= 0) {
- AppendToBuffer("%d", imm8);
- } else {
- AppendToBuffer("cl");
- }
+ const char* mnem = NULL;
+ switch (regop) {
+ case kROL:
+ mnem = "rol";
+ break;
+ case kROR:
+ mnem = "ror";
+ break;
+ case kRCL:
+ mnem = "rcl";
+ break;
+ case kRCR:
+ mnem = "rcr";
+ break;
+ case kSHL:
+ mnem = "shl";
+ break;
+ case KSHR:
+ mnem = "shr";
+ break;
+ case kSAR:
+ mnem = "sar";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data);
+ if (op == 0xD1) {
+ imm8 = 1;
+ } else if (op == 0xC1) {
+ imm8 = *(data + 2);
+ count++;
+ } else if (op == 0xD3) {
+ // Shift/rotate by cl.
+ }
+ if (imm8 >= 0) {
+ AppendToBuffer(",%d", imm8);
} else {
- UnimplementedInstruction();
+ AppendToBuffer(",cl");
}
- return num_bytes;
+ return 1 + count;
}
@@ -954,17 +971,18 @@
data += 3;
break;
- case 0x69: // fall through
- case 0x6B:
- { int mod, regop, rm;
- get_modrm(*(data+1), &mod, ®op, &rm);
- int32_t imm =
- *data == 0x6B ? *(data+2) :
*reinterpret_cast<int32_t*>(data+2);
- AppendToBuffer("imul %s,%s,0x%x",
- NameOfCPURegister(regop),
- NameOfCPURegister(rm),
- imm);
- data += 2 + (*data == 0x6B ? 1 : 4);
+ case 0x6B: {
+ data++;
+ data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
+ AppendToBuffer(",%d", *data);
+ data++;
+ } break;
+
+ case 0x69: {
+ data++;
+ data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
+ AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data));
+ data += 4;
}
break;
=======================================
--- /trunk/src/x87/ic-x87.cc Thu Jul 31 00:04:39 2014 UTC
+++ /trunk/src/x87/ic-x87.cc Fri Aug 1 10:40:37 2014 UTC
@@ -988,21 +988,6 @@
const Register StoreIC::ReceiverRegister() { return edx; }
const Register StoreIC::NameRegister() { return ecx; }
const Register StoreIC::ValueRegister() { return eax; }
-
-
-const Register KeyedStoreIC::ReceiverRegister() {
- return StoreIC::ReceiverRegister();
-}
-
-
-const Register KeyedStoreIC::NameRegister() {
- return StoreIC::NameRegister();
-}
-
-
-const Register KeyedStoreIC::ValueRegister() {
- return StoreIC::ValueRegister();
-}
const Register KeyedStoreIC::MapRegister() {
=======================================
--- /trunk/src/x87/lithium-x87.cc Thu Jul 31 00:04:39 2014 UTC
+++ /trunk/src/x87/lithium-x87.cc Fri Aug 1 10:40:37 2014 UTC
@@ -7,9 +7,8 @@
#if V8_TARGET_ARCH_X87
#include "src/hydrogen-osr.h"
-#include "src/lithium-allocator-inl.h"
+#include "src/lithium-inl.h"
#include "src/x87/lithium-codegen-x87.h"
-#include "src/x87/lithium-x87.h"
namespace v8 {
namespace internal {
=======================================
--- /trunk/src/x87/lithium-x87.h Thu Jul 31 00:04:39 2014 UTC
+++ /trunk/src/x87/lithium-x87.h Fri Aug 1 10:40:37 2014 UTC
@@ -14,6 +14,10 @@
namespace v8 {
namespace internal {
+namespace compiler {
+class RCodeVisualizer;
+}
+
// Forward declarations.
class LCodeGen;
@@ -202,7 +206,7 @@
enum Opcode {
// Declare a unique enum value for each instruction.
#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter,
kNumberOfInstructions
#undef DECLARE_OPCODE
};
@@ -220,6 +224,9 @@
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
+
+ // Try deleting this instruction if possible.
+ virtual bool TryDelete() { return false; }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@@ -263,11 +270,12 @@
void VerifyCall();
#endif
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
private:
// Iterator support.
friend class InputIterator;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
friend class TempIterator;
virtual int TempCount() = 0;
=======================================
--- /trunk/src/x87/stub-cache-x87.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/x87/stub-cache-x87.cc Fri Aug 1 10:40:37 2014 UTC
@@ -1195,19 +1195,10 @@
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreIC::ReceiverRegister();
Register name = StoreIC::NameRegister();
+ ASSERT(ebx.is(KeyedStoreIC::MapRegister()));
static Register registers[] = { receiver, name, ebx, edi, no_reg };
return registers;
}
-
-
-Register* PropertyAccessCompiler::keyed_store_calling_convention() {
- // receiver, name, scratch1/map, scratch2, scratch3.
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- Register map = KeyedStoreIC::MapRegister();
- static Register registers[] = { receiver, name, map, edi, no_reg };
- return registers;
-}
Register NamedStoreHandlerCompiler::value() { return
StoreIC::ValueRegister(); }
=======================================
--- /trunk/test/cctest/cctest.gyp Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/test/cctest/cctest.gyp Fri Aug 1 10:40:37 2014 UTC
@@ -53,6 +53,7 @@
'compiler/simplified-graph-builder.cc',
'compiler/simplified-graph-builder.h',
'compiler/test-branch-combine.cc',
+ 'compiler/test-changes-lowering.cc',
'compiler/test-codegen-deopt.cc',
'compiler/test-gap-resolver.cc',
'compiler/test-graph-reducer.cc',
=======================================
--- /trunk/test/cctest/compiler/graph-builder-tester.h Thu Jul 31 18:45:14
2014 UTC
+++ /trunk/test/cctest/compiler/graph-builder-tester.h Fri Aug 1 10:40:37
2014 UTC
@@ -84,11 +84,11 @@
public SimplifiedGraphBuilder,
public CallHelper2<ReturnType, GraphBuilderTester<ReturnType> > {
public:
- explicit GraphBuilderTester(MachineRepresentation p0,
- MachineRepresentation p1,
- MachineRepresentation p2,
- MachineRepresentation p3,
- MachineRepresentation p4)
+ explicit GraphBuilderTester(MachineRepresentation p0 = kMachineLast,
+ MachineRepresentation p1 = kMachineLast,
+ MachineRepresentation p2 = kMachineLast,
+ MachineRepresentation p3 = kMachineLast,
+ MachineRepresentation p4 = kMachineLast)
: GraphAndBuilders(main_zone()),
MachineCallHelper(
main_zone(),
=======================================
--- /trunk/test/cctest/compiler/test-simplified-lowering.cc Thu Jul 31
18:45:14 2014 UTC
+++ /trunk/test/cctest/compiler/test-simplified-lowering.cc Fri Aug 1
10:40:37 2014 UTC
@@ -5,6 +5,7 @@
#include <limits>
#include "src/compiler/control-builders.h"
+#include "src/compiler/generic-node-inl.h"
#include "src/compiler/node-properties-inl.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/simplified-lowering.h"
@@ -23,6 +24,7 @@
using namespace v8::internal;
using namespace v8::internal::compiler;
+// TODO(titzer): rename this to VMLoweringTester
template <typename ReturnType>
class SimplifiedGraphBuilderTester : public GraphBuilderTester<ReturnType>
{
public:
@@ -31,16 +33,20 @@
MachineRepresentation p2 = kMachineLast,
MachineRepresentation p3 = kMachineLast,
MachineRepresentation p4 = kMachineLast)
- : GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4) {}
+ : GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4),
+ typer(this->zone()),
+ source_positions(this->graph()),
+ jsgraph(this->graph(), this->common(), &typer),
+ lowering(&jsgraph, &source_positions) {}
+
+ Typer typer;
+ SourcePositionTable source_positions;
+ JSGraph jsgraph;
+ SimplifiedLowering lowering;
// Close graph and lower one node.
void Lower(Node* node) {
this->End();
- Typer typer(this->zone());
- CommonOperatorBuilder common(this->zone());
- SourcePositionTable source_positions(this->graph());
- JSGraph jsgraph(this->graph(), &common, &typer);
- SimplifiedLowering lowering(&jsgraph, &source_positions);
if (node == NULL) {
lowering.LowerAllNodes();
} else {
@@ -74,313 +80,6 @@
Factory* factory() { return this->isolate()->factory(); }
Heap* heap() { return this->isolate()->heap(); }
};
-
-
-class SimplifiedGraphBuilderJSTester
- : public SimplifiedGraphBuilderTester<Object*> {
- public:
- SimplifiedGraphBuilderJSTester()
- : SimplifiedGraphBuilderTester<Object*>(),
-
f_(v8::Utils::OpenHandle(*v8::Handle<v8::Function>::Cast(CompileRun(
- "(function() { 'use strict'; return 2.7123; })")))),
- swapped_(false) {
- set_current_context(HeapConstant(handle(f_->context())));
- }
-
- template <typename T>
- T* CallJS() {
- if (!swapped_) {
- Compile();
- }
- Handle<Object>* args = NULL;
- MaybeHandle<Object> result = Execution::Call(
- isolate(), f_, factory()->undefined_value(), 0, args, false);
- return T::cast(*result.ToHandleChecked());
- }
-
- private:
- void Compile() {
- CompilationInfoWithZone info(f_);
- CHECK(Parser::Parse(&info));
- StrictMode strict_mode = info.function()->strict_mode();
- info.SetStrictMode(strict_mode);
- info.SetOptimizing(BailoutId::None(), Handle<Code>(f_->code()));
- CHECK(Rewriter::Rewrite(&info));
- CHECK(Scope::Analyze(&info));
- CHECK_NE(NULL, info.scope());
- Pipeline pipeline(&info);
- Linkage linkage(&info);
- Handle<Code> code = pipeline.GenerateCodeForMachineGraph(&linkage,
graph());
- CHECK(!code.is_null());
- f_->ReplaceCode(*code);
- swapped_ = true;
- }
-
- Handle<JSFunction> f_;
- bool swapped_;
-};
-
-
-TEST(RunChangeTaggedToInt32) {
- SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
- Node* x = t.ChangeTaggedToInt32(t.Parameter(0));
- t.Return(x);
-
- t.Lower(x);
-
- // TODO(titzer): remove me.
- return;
-
- FOR_INT32_INPUTS(i) {
- int32_t input = *i;
-
- if (Smi::IsValid(input)) {
- int32_t result = t.Call(Smi::FromInt(input));
- CHECK_EQ(input, result);
- }
-
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- int32_t result = t.Call(*number);
- CHECK_EQ(input, result);
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- int32_t result = t.Call(*number);
- CHECK_EQ(input, result);
- }
- }
-}
-
-
-TEST(RunChangeTaggedToUint32) {
- SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
- Node* x = t.ChangeTaggedToUint32(t.Parameter(0));
- t.Return(x);
-
- t.Lower(x);
-
- // TODO(titzer): remove me.
- return;
-
- FOR_UINT32_INPUTS(i) {
- uint32_t input = *i;
-
- if (Smi::IsValid(input)) {
- int32_t result = t.Call(Smi::FromInt(input));
- CHECK_EQ(static_cast<int32_t>(input), result);
- }
-
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- int32_t result = t.Call(*number);
- CHECK_EQ(static_cast<int32_t>(input), result);
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- int32_t result = t.Call(*number);
- CHECK_EQ(static_cast<int32_t>(input), result);
- }
- }
-}
-
-
-TEST(RunChangeTaggedToFloat64) {
- SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
- double result;
- Node* x = t.ChangeTaggedToFloat64(t.Parameter(0));
- t.StoreFloat64(x, &result);
- t.Return(t.Int32Constant(0));
-
- t.Lower(x);
-
- // TODO(titzer): remove me.
- return;
-
- {
- FOR_INT32_INPUTS(i) {
- int32_t input = *i;
-
- if (Smi::IsValid(input)) {
- t.Call(Smi::FromInt(input));
- CHECK_EQ(input, static_cast<int32_t>(result));
- }
-
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- t.Call(*number);
- CHECK_EQ(input, static_cast<int32_t>(result));
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- t.Call(*number);
- CHECK_EQ(input, static_cast<int32_t>(result));
- }
- }
- }
-
- {
- FOR_FLOAT64_INPUTS(i) {
- double input = *i;
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- t.Call(*number);
- CHECK_EQ(input, result);
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- t.Call(*number);
- CHECK_EQ(input, result);
- }
- }
- }
-}
-
-
-TEST(RunChangeBoolToBit) {
- SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
- Node* x = t.ChangeBoolToBit(t.Parameter(0));
- t.Return(x);
-
- t.Lower(x);
-
- if (!Pipeline::SupportedTarget()) return;
-
- {
- Object* true_obj = t.heap()->true_value();
- int32_t result = t.Call(true_obj);
- CHECK_EQ(1, result);
- }
-
- {
- Object* false_obj = t.heap()->false_value();
- int32_t result = t.Call(false_obj);
- CHECK_EQ(0, result);
- }
-}
-
-
-TEST(RunChangeBitToBool) {
- SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
- Node* x = t.ChangeBitToBool(t.Parameter(0));
- t.Return(x);
-
- t.Lower(x);
-
- // TODO(titzer): remove me.
- return;
-
- {
- Object* result = t.Call(1);
- Object* true_obj = t.heap()->true_value();
- CHECK_EQ(true_obj, result);
- }
-
- {
- Object* result = t.Call(0);
- Object* false_obj = t.heap()->false_value();
- CHECK_EQ(false_obj, result);
- }
-}
-
-
-TEST(RunChangeInt32ToTagged) {
- SimplifiedGraphBuilderJSTester t;
- int32_t input;
- Node* load = t.LoadInt32(&input);
- Node* x = t.ChangeInt32ToTagged(load);
- t.Return(x);
-
- t.Lower(x);
-
- // TODO(titzer): remove me.
- return;
-
-
- {
- FOR_INT32_INPUTS(i) {
- input = *i;
- HeapNumber* result = t.CallJS<HeapNumber>();
- CHECK_EQ(static_cast<double>(input), result->value());
- }
- }
-
- {
- FOR_INT32_INPUTS(i) {
- input = *i;
- SimulateFullSpace(CcTest::heap()->new_space());
- HeapNumber* result = t.CallJS<HeapNumber>();
- CHECK_EQ(static_cast<double>(input), result->value());
- }
- }
-}
-
-
-TEST(RunChangeUint32ToTagged) {
- SimplifiedGraphBuilderJSTester t;
- uint32_t input;
- Node* load = t.LoadUint32(&input);
- Node* x = t.ChangeUint32ToTagged(load);
- t.Return(x);
-
- t.Lower(x);
-
- // TODO(titzer): remove me.
- return;
-
- {
- FOR_UINT32_INPUTS(i) {
- input = *i;
- HeapNumber* result = t.CallJS<HeapNumber>();
- double expected = static_cast<double>(input);
- CHECK_EQ(expected, result->value());
- }
- }
-
- {
- FOR_UINT32_INPUTS(i) {
- input = *i;
- SimulateFullSpace(CcTest::heap()->new_space());
- HeapNumber* result = t.CallJS<HeapNumber>();
- double expected = static_cast<double>(static_cast<uint32_t>(input));
- CHECK_EQ(expected, result->value());
- }
- }
-}
-
-
-TEST(RunChangeFloat64ToTagged) {
- SimplifiedGraphBuilderJSTester t;
- double input;
- Node* load = t.LoadFloat64(&input);
- Node* x = t.ChangeFloat64ToTagged(load);
- t.Return(x);
-
- t.Lower(x);
-
- // TODO(titzer): remove me.
- return;
-
- {
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- HeapNumber* result = t.CallJS<HeapNumber>();
- CHECK_EQ(input, result->value());
- }
- }
- {
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- SimulateFullSpace(CcTest::heap()->new_space());
- HeapNumber* result = t.CallJS<HeapNumber>();
- CHECK_EQ(input, result->value());
- }
- }
-}
// TODO(dcarney): find a home for these functions.
=======================================
--- /trunk/test/cctest/test-assembler-x87.cc Tue Jul 8 06:57:45 2014 UTC
+++ /trunk/test/cctest/test-assembler-x87.cc Fri Aug 1 10:40:37 2014 UTC
@@ -33,6 +33,7 @@
#include "src/disassembler.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
+#include "src/ostreams.h"
#include "src/serialize.h"
#include "test/cctest/cctest.h"
=======================================
--- /trunk/test/cctest/test-disasm-x87.cc Tue Jul 8 06:57:45 2014 UTC
+++ /trunk/test/cctest/test-disasm-x87.cc Fri Aug 1 10:40:37 2014 UTC
@@ -168,6 +168,11 @@
__ nop();
__ idiv(edx);
+ __ idiv(Operand(edx, ecx, times_1, 1));
+ __ idiv(Operand(esp, 12));
+ __ div(edx);
+ __ div(Operand(edx, ecx, times_1, 1));
+ __ div(Operand(esp, 12));
__ mul(edx);
__ neg(edx);
__ not_(edx);
@@ -175,7 +180,9 @@
__ imul(edx, Operand(ebx, ecx, times_4, 10000));
__ imul(edx, ecx, 12);
+ __ imul(edx, Operand(edx, eax, times_2, 42), 8);
__ imul(edx, ecx, 1000);
+ __ imul(edx, Operand(ebx, ecx, times_4, 1), 9000);
__ inc(edx);
__ inc(Operand(ebx, ecx, times_4, 10000));
@@ -197,15 +204,24 @@
__ sar(edx, 1);
__ sar(edx, 6);
__ sar_cl(edx);
+ __ sar(Operand(ebx, ecx, times_4, 10000), 1);
+ __ sar(Operand(ebx, ecx, times_4, 10000), 6);
+ __ sar_cl(Operand(ebx, ecx, times_4, 10000));
__ sbb(edx, Operand(ebx, ecx, times_4, 10000));
__ shld(edx, Operand(ebx, ecx, times_4, 10000));
__ shl(edx, 1);
__ shl(edx, 6);
__ shl_cl(edx);
+ __ shl(Operand(ebx, ecx, times_4, 10000), 1);
+ __ shl(Operand(ebx, ecx, times_4, 10000), 6);
+ __ shl_cl(Operand(ebx, ecx, times_4, 10000));
__ shrd(edx, Operand(ebx, ecx, times_4, 10000));
__ shr(edx, 1);
__ shr(edx, 7);
__ shr_cl(edx);
+ __ shr(Operand(ebx, ecx, times_4, 10000), 1);
+ __ shr(Operand(ebx, ecx, times_4, 10000), 6);
+ __ shr_cl(Operand(ebx, ecx, times_4, 10000));
// Immediates
@@ -361,6 +377,14 @@
__ frndint();
__ fninit();
__ nop();
+
+ // xchg.
+ {
+ __ xchg(eax, eax);
+ __ xchg(eax, ebx);
+ __ xchg(ebx, ebx);
+ __ xchg(ebx, Operand(esp, 12));
+ }
// Nop instructions
for (int i = 0; i < 16; i++) {
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.