Reviewers: Toon Verwaest,
Description:
Merged r15808, r15811 into trunk branch.
ARM: Ensure space for lazy deoptimization before calling IC.
MIPS: Ensure space for lazy deoptimization before calling IC.
BUG=247688
[email protected]
Please review this at https://chromiumcodereview.appspot.com/19670016/
SVN Base: https://v8.googlecode.com/svn/trunk
Affected files:
M src/arm/lithium-codegen-arm.cc
M src/mips/lithium-codegen-mips.cc
M src/version.cc
A + test/mjsunit/regress/regress-247688.js
Index: src/arm/lithium-codegen-arm.cc
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index
9e0d59f8ec910e4d7d3ef60b44d7063ba8f75009..89eb8c858eac7f848ca72552595306944ea02769
100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -277,6 +277,7 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
return !is_aborted();
}
@@ -676,6 +677,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode) {
+ EnsureSpaceForLazyDeopt();
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi
code
// is in the correct position.
@@ -5610,12 +5612,12 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
padding_size -= Assembler::kInstrSize;
}
}
- last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5673,6 +5675,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5685,6 +5688,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
Index: src/mips/lithium-codegen-mips.cc
diff --git a/src/mips/lithium-codegen-mips.cc
b/src/mips/lithium-codegen-mips.cc
index
65b4a575f7703d1d8394808141c0b2708b91e71f..29633dd56ba7abc428a714455d8a4ec55b145eb7
100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -271,6 +271,7 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
return !is_aborted();
}
@@ -5615,12 +5616,12 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
padding_size -= Assembler::kInstrSize;
}
}
- last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5676,6 +5677,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
StackCheckStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5687,6 +5689,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
Index: src/version.cc
diff --git a/src/version.cc b/src/version.cc
index
1f1ff7add70775b19ecdf1dac8645a163db83fd0..41ac37fcb38f4f9505fc9df0002c2d0b080a0208
100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 20
#define BUILD_NUMBER 7
-#define PATCH_LEVEL 0
+#define PATCH_LEVEL 1
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
Index: test/mjsunit/regress/regress-247688.js
diff --git a/test/mjsunit/regress/regress-234101.js
b/test/mjsunit/regress/regress-247688.js
similarity index 73%
copy from test/mjsunit/regress/regress-234101.js
copy to test/mjsunit/regress/regress-247688.js
index
74228dfabe532efdce2d2cd31736959eaff4349b..80e2884c705ef230e96a93ede8e5815175b01363
100644
--- a/test/mjsunit/regress/regress-234101.js
+++ b/test/mjsunit/regress/regress-247688.js
@@ -27,16 +27,54 @@
// Flags: --allow-natives-syntax
-// Currently, the gap resolver doesn't handle moves from a ConstantOperand
to a
-// DoubleRegister, but these kind of moves appeared when
HConstant::EmitAtUses
-// was changed to allow special double values (-0, NaN, hole). So we should
-// either enhance the gap resolver or make sure that such moves don't
happen.
+var a = {};
+a.x = 1
+a.y = 1.5
-function foo(x) {
- return (x ? NaN : 0.2) + 0.1;
+var b = {}
+b.x = 1.5;
+b.y = 1;
+
+var c = {}
+c.x = 1.5;
+
+var d = {}
+d.x = 1.5;
+
+var e = {}
+e.x = 1.5;
+
+var f = {}
+f.x = 1.5;
+
+var g = {}
+g.x = 1.5;
+
+var h = {}
+h.x = 1.5;
+
+var i = {}
+i.x = 1.5;
+
+var o = {}
+var p = {y : 10, z : 1}
+o.__proto__ = p;
+delete p.z
+
+function foo(v, w) {
+ // Make load via IC in optimized code. Its target will get overwritten by
+ // lazy deopt patch for the stack check.
+ v.y;
+ // Make store with transition to make this code dependent on the map.
+ w.y = 1;
+ return b.y;
}
-foo(false);
-foo(false);
+foo(o, c);
+foo(o, d);
+foo(o, e);
%OptimizeFunctionOnNextCall(foo);
-foo(false);
+foo(b, f);
+foo(b, g);
+foo(b, h);
+foo(a, i);
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.