Reviewers: Jakob,
Description:
Merged r16224 into 3.20 branch.
Fix invalid out-of-bounds store in MacroAssembler::Allocate.
[email protected]
BUG=chromium:263515
Please review this at https://codereview.chromium.org/22980024/
SVN Base: https://v8.googlecode.com/svn/branches/3.20
Affected files:
M src/arm/macro-assembler-arm.cc
M src/ia32/macro-assembler-ia32.cc
M src/mips/macro-assembler-mips.cc
M src/version.cc
M src/x64/macro-assembler-x64.cc
Index: src/arm/macro-assembler-arm.cc
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index
a56744bf597a245a0afed0b4b1a64b38d1a4cd0c..974b56959fd5237621f1090158c228e11a681e28
100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1732,12 +1732,16 @@ void MacroAssembler::Allocate(int object_size,
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking
top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand(ip));
+ b(hs, gc_required);
+ }
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
@@ -1830,12 +1834,16 @@ void MacroAssembler::Allocate(Register object_size,
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking
top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand(ip));
+ b(hs, gc_required);
+ }
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
Index: src/ia32/macro-assembler-ia32.cc
diff --git a/src/ia32/macro-assembler-ia32.cc
b/src/ia32/macro-assembler-ia32.cc
index
67a7c0d2b49010552c1afb5d0c1ccf1dbe84e92b..f86820cba6469876d73c63c397cb72c1ea0ed448
100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1307,26 +1307,29 @@ void MacroAssembler::Allocate(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
// Align the next allocation. Storing the filler map without checking
top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
- Register top_reg = result_end.is_valid() ? result_end : result;
-
// Calculate new top and bail out if space is exhausted.
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
+ Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
mov(top_reg, result);
}
@@ -1381,14 +1384,21 @@ void MacroAssembler::Allocate(int header_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
// Align the next allocation. Storing the filler map without checking
top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
@@ -1396,9 +1406,6 @@ void MacroAssembler::Allocate(int header_size,
}
// Calculate new top and bail out if space is exhausted.
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
// We assume that element_count*element_size + header_size does not
// overflow.
if (element_count_type == REGISTER_VALUE_IS_SMI) {
@@ -1452,14 +1459,21 @@ void MacroAssembler::Allocate(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
// Align the next allocation. Storing the filler map without checking
top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
@@ -1467,9 +1481,6 @@ void MacroAssembler::Allocate(Register object_size,
}
// Calculate new top and bail out if space is exhausted.
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
Index: src/mips/macro-assembler-mips.cc
diff --git a/src/mips/macro-assembler-mips.cc
b/src/mips/macro-assembler-mips.cc
index
e53f10afaca7681805d23683151f29a43f255167..5becf7c370270f03bc00de8bb71473a8be809916
100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -2944,12 +2944,15 @@ void MacroAssembler::Allocate(int object_size,
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking
top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
And(scratch2, result, Operand(kDoubleAlignmentMask));
Label aligned;
Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ }
li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
sw(scratch2, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
@@ -3028,12 +3031,15 @@ void MacroAssembler::Allocate(Register object_size,
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking
top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
And(scratch2, result, Operand(kDoubleAlignmentMask));
Label aligned;
Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ }
li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
sw(scratch2, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
Index: src/version.cc
diff --git a/src/version.cc b/src/version.cc
index
de0265e7d036ca334866ae9c5cc5878906a6db8b..85c6b7c02f7b0e08b20b3a8f775a796a877369dd
100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 20
#define BUILD_NUMBER 17
-#define PATCH_LEVEL 1
+#define PATCH_LEVEL 2
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
Index: src/x64/macro-assembler-x64.cc
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index
9c9b1620e586ce5872bbc46de2eaf515266716a0..74e3fcc33a1701d4a1836090188de51776bc22f5
100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -3859,7 +3859,7 @@ void MacroAssembler::Allocate(int object_size,
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking
top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
testq(result, Immediate(kDoubleAlignmentMask));
Check(zero, kAllocationIsNotDoubleAligned);
@@ -3938,7 +3938,7 @@ void MacroAssembler::Allocate(Register object_size,
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking
top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
testq(result, Immediate(kDoubleAlignmentMask));
Check(zero, kAllocationIsNotDoubleAligned);
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.