Revision: 15848
Author: [email protected]
Date: Wed Jul 24 01:50:03 2013
Log: Objects can no longer be migrated or evacuated into large object
space.
BUG=
[email protected]
Review URL: https://codereview.chromium.org/19959007
http://code.google.com/p/v8/source/detail?r=15848
Modified:
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/spaces.h
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Tue Jul 23 00:41:46 2013
+++ /branches/bleeding_edge/src/mark-compact.cc Wed Jul 24 01:50:03 2013
@@ -2722,7 +2722,9 @@
int size,
AllocationSpace dest) {
HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
+ // TODO(hpayer): Replace that check with an assert.
+ CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
+ if (dest == OLD_POINTER_SPACE) {
Address src_slot = src;
Address dst_slot = dst;
ASSERT(IsAligned(size, kPointerSize));
@@ -2894,37 +2896,24 @@
bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
int object_size) {
- Object* result;
+ // TODO(hpayer): Replace that check with an assert.
+ CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
- if (object_size > Page::kMaxNonCodeHeapObjectSize) {
- MaybeObject* maybe_result =
- heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
- object_size,
- LO_SPACE);
- heap()->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
- } else {
- OldSpace* target_space = heap()->TargetSpace(object);
+ OldSpace* target_space = heap()->TargetSpace(object);
- ASSERT(target_space == heap()->old_pointer_space() ||
- target_space == heap()->old_data_space());
- MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
- object_size,
- target_space->identity());
- heap()->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
+ ASSERT(target_space == heap()->old_pointer_space() ||
+ target_space == heap()->old_data_space());
+ Object* result;
+ MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
+ if (maybe_result->ToObject(&result)) {
+ HeapObject* target = HeapObject::cast(result);
+ MigrateObject(target->address(),
+ object->address(),
+ object_size,
+ target_space->identity());
+ heap()->mark_compact_collector()->tracer()->
+ increment_promoted_objects_size(object_size);
+ return true;
}
return false;
=======================================
--- /branches/bleeding_edge/src/spaces.h Wed Jul 3 08:39:18 2013
+++ /branches/bleeding_edge/src/spaces.h Wed Jul 24 01:50:03 2013
@@ -781,7 +781,10 @@
// Object area size in bytes.
static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
- // Maximum object size that fits in a page.
+ // Maximum object size that fits in a page. Objects larger than that size
+ // are allocated in large object space and are never moved in memory.
This
+ // also applies to new space allocation, since objects are never migrated
+ // from new space to large object space.
static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
// Page size mask.
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.