Revision: 22661
Author: [email protected]
Date: Tue Jul 29 08:45:47 2014 UTC
Log: Version 3.28.45 (based on bleeding_edge revision r22658)
Performance and stability improvements on all platforms.
http://code.google.com/p/v8/source/detail?r=22661
Modified:
/trunk/ChangeLog
/trunk/src/array.js
/trunk/src/bootstrapper.cc
/trunk/src/counters.cc
/trunk/src/counters.h
/trunk/src/elements.cc
/trunk/src/factory.cc
/trunk/src/gc-tracer.cc
/trunk/src/gc-tracer.h
/trunk/src/globals.h
/trunk/src/heap.cc
/trunk/src/heap.h
/trunk/src/ic.cc
/trunk/src/incremental-marking.cc
/trunk/src/list-inl.h
/trunk/src/lithium.cc
/trunk/src/objects-inl.h
/trunk/src/objects.cc
/trunk/src/runtime.cc
/trunk/src/runtime.h
/trunk/src/spaces.cc
/trunk/src/spaces.h
/trunk/src/store-buffer.cc
/trunk/src/stub-cache.cc
/trunk/src/stub-cache.h
/trunk/src/version.cc
/trunk/src/x87/code-stubs-x87.cc
/trunk/src/x87/stub-cache-x87.cc
/trunk/test/cctest/test-constantpool.cc
/trunk/test/cctest/test-heap.cc
/trunk/test/fuzz-natives/base.js
/trunk/test/mjsunit/allocation-site-info.js
/trunk/test/mjsunit/apply.js
/trunk/test/mjsunit/array-constructor-feedback.js
/trunk/test/mjsunit/array-feedback.js
/trunk/test/mjsunit/elements-kind.js
/trunk/test/mjsunit/es7/object-observe.js
/trunk/test/mjsunit/mjsunit.status
/trunk/test/mjsunit/polymorph-arrays.js
/trunk/test/mjsunit/regress/regress-2790.js
/trunk/tools/generate-runtime-tests.py
/trunk/tools/push-to-trunk/releases.py
/trunk/tools/push-to-trunk/test_scripts.py
/trunk/tools/whitespace.txt
=======================================
--- /trunk/ChangeLog Mon Jul 28 00:04:53 2014 UTC
+++ /trunk/ChangeLog Tue Jul 29 08:45:47 2014 UTC
@@ -1,3 +1,8 @@
+2014-07-29: Version 3.28.45
+
+ Performance and stability improvements on all platforms.
+
+
2014-07-28: Version 3.28.43
Performance and stability improvements on all platforms.
=======================================
--- /trunk/src/array.js Tue Jul 15 00:04:47 2014 UTC
+++ /trunk/src/array.js Tue Jul 29 08:45:47 2014 UTC
@@ -86,11 +86,20 @@
}
-function UseSparseVariant(object, length, is_array) {
- return is_array &&
- length > 1000 &&
- (!%_IsSmi(length) ||
- %EstimateNumberOfElements(object) < (length >> 2));
+function UseSparseVariant(array, length, is_array, touched) {
+ // Only use the sparse variant on arrays that are likely to be sparse
and the
+ // number of elements touched in the operation is relatively small
compared to
+ // the overall size of the array.
+ if (!is_array || length < 1000 || %IsObserved(array)) {
+ return false;
+ }
+ if (!%_IsSmi(length)) {
+ return true;
+ }
+ var elements_threshold = length >> 2; // No more than 75% holes
+ var estimated_elements = %EstimateNumberOfElements(array);
+ return (estimated_elements < elements_threshold) &&
+ (touched > estimated_elements * 4);
}
@@ -107,7 +116,8 @@
// Attempt to convert the elements.
try {
- if (UseSparseVariant(array, length, is_array)) {
+ if (UseSparseVariant(array, length, is_array, length)) {
+ %NormalizeElements(array);
if (separator.length == 0) {
return SparseJoin(array, length, convert);
} else {
@@ -518,13 +528,15 @@
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
var array = TO_OBJECT_INLINE(this);
- var j = TO_UINT32(array.length) - 1;
+ var len = TO_UINT32(array.length);
- if (UseSparseVariant(array, j, IS_ARRAY(array))) {
- SparseReverse(array, j+1);
+ if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
+ %NormalizeElements(array);
+ SparseReverse(array, len);
return array;
}
+ var j = len - 1;
for (var i = 0; i < j; i++, j--) {
var current_i = array[i];
if (!IS_UNDEFINED(current_i) || i in array) {
@@ -670,10 +682,9 @@
if (end_i < start_i) return result;
- if (IS_ARRAY(array) &&
- !%IsObserved(array) &&
- (end_i > 1000) &&
- (%EstimateNumberOfElements(array) < end_i)) {
+ if (UseSparseVariant(array, len, IS_ARRAY(array), end_i - start_i)) {
+ %NormalizeElements(array);
+ %NormalizeElements(result);
SmartSlice(array, start_i, end_i - start_i, len, result);
} else {
SimpleSlice(array, start_i, end_i - start_i, len, result);
@@ -781,24 +792,20 @@
["Array.prototype.splice"]);
}
- var use_simple_splice = true;
- if (IS_ARRAY(array) &&
- num_elements_to_add !== del_count) {
- // If we are only deleting/moving a few things near the end of the
- // array then the simple version is going to be faster, because it
- // doesn't touch most of the array.
- var estimated_non_hole_elements = %EstimateNumberOfElements(array);
- if (len > 20 && (estimated_non_hole_elements >> 2) < (len - start_i)) {
- use_simple_splice = false;
- }
+ var changed_elements = del_count;
+ if (num_elements_to_add != del_count) {
+ // If the slice needs to do a actually move elements after the
insertion
+ // point, then include those in the estimate of changed elements.
+ changed_elements += len - start_i - del_count;
}
-
- if (use_simple_splice) {
+ if (UseSparseVariant(array, len, IS_ARRAY(array), changed_elements)) {
+ %NormalizeElements(array);
+ %NormalizeElements(deleted_elements);
+ SmartSlice(array, start_i, del_count, len, deleted_elements);
+ SmartMove(array, start_i, del_count, len, num_elements_to_add);
+ } else {
SimpleSlice(array, start_i, del_count, len, deleted_elements);
SimpleMove(array, start_i, del_count, len, num_elements_to_add);
- } else {
- SmartSlice(array, start_i, del_count, len, deleted_elements);
- SmartMove(array, start_i, del_count, len, num_elements_to_add);
}
// Insert the arguments into the resulting array in
@@ -1283,7 +1290,8 @@
}
var min = index;
var max = length;
- if (UseSparseVariant(this, length, IS_ARRAY(this))) {
+ if (UseSparseVariant(this, length, IS_ARRAY(this), max - min)) {
+ %NormalizeElements(this);
var indices = %GetArrayKeys(this, length);
if (IS_NUMBER(indices)) {
// It's an interval.
@@ -1338,7 +1346,8 @@
}
var min = 0;
var max = index;
- if (UseSparseVariant(this, length, IS_ARRAY(this))) {
+ if (UseSparseVariant(this, length, IS_ARRAY(this), index)) {
+ %NormalizeElements(this);
var indices = %GetArrayKeys(this, index + 1);
if (IS_NUMBER(indices)) {
// It's an interval.
=======================================
--- /trunk/src/bootstrapper.cc Tue Jul 22 00:04:43 2014 UTC
+++ /trunk/src/bootstrapper.cc Tue Jul 29 08:45:47 2014 UTC
@@ -355,6 +355,7 @@
Handle<JSGlobalProxy>
global_proxy(JSGlobalProxy::cast(env->global_proxy()));
global_proxy->set_native_context(*factory->null_value());
SetObjectPrototype(global_proxy, factory->null_value());
+ global_proxy->map()->set_constructor(*factory->null_value());
}
=======================================
--- /trunk/src/counters.cc Mon Jul 7 08:17:56 2014 UTC
+++ /trunk/src/counters.cc Tue Jul 29 08:45:47 2014 UTC
@@ -55,6 +55,11 @@
Counters::Counters(Isolate* isolate) {
+#define HR(name, caption, min, max, num_buckets) \
+ name##_ = Histogram(#caption, min, max, num_buckets, isolate);
+ HISTOGRAM_RANGE_LIST(HR)
+#undef HR
+
#define HT(name, caption) \
name##_ = HistogramTimer(#caption, 0, 10000, 50, isolate);
HISTOGRAM_TIMER_LIST(HT)
@@ -142,6 +147,10 @@
void Counters::ResetHistograms() {
+#define HR(name, caption, min, max, num_buckets) name##_.Reset();
+ HISTOGRAM_RANGE_LIST(HR)
+#undef HR
+
#define HT(name, caption) name##_.Reset();
HISTOGRAM_TIMER_LIST(HT)
#undef HT
=======================================
--- /trunk/src/counters.h Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/src/counters.h Tue Jul 29 08:45:47 2014 UTC
@@ -291,6 +291,9 @@
#endif
};
+#define HISTOGRAM_RANGE_LIST(HR) \
+ /* Generic range histograms */ \
+ HR(gc_idle_time_allotted_in_ms, V8.GCIdleTimeAllottedInMS, 0, 10000, 101)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
@@ -552,6 +555,11 @@
// This file contains all the v8 counters that are in use.
class Counters {
public:
+#define HR(name, caption, min, max, num_buckets) \
+ Histogram* name() { return &name##_; }
+ HISTOGRAM_RANGE_LIST(HR)
+#undef HR
+
#define HT(name, caption) \
HistogramTimer* name() { return &name##_; }
HISTOGRAM_TIMER_LIST(HT)
@@ -639,6 +647,10 @@
void ResetHistograms();
private:
+#define HR(name, caption, min, max, num_buckets) Histogram name##_;
+ HISTOGRAM_RANGE_LIST(HR)
+#undef HR
+
#define HT(name, caption) \
HistogramTimer name##_;
HISTOGRAM_TIMER_LIST(HT)
=======================================
--- /trunk/src/elements.cc Mon Jul 28 00:04:53 2014 UTC
+++ /trunk/src/elements.cc Tue Jul 29 08:45:47 2014 UTC
@@ -887,8 +887,7 @@
typedef typename KindTraits::BackingStore BackingStore;
- // Adjusts the length of the fast backing store or returns the new
length or
- // undefined in case conversion to a slow backing store should be
performed.
+ // Adjusts the length of the fast backing store.
static Handle<Object> SetLengthWithoutNormalize(
Handle<FixedArrayBase> backing_store,
Handle<JSArray> array,
@@ -940,15 +939,10 @@
// Check whether the backing store should be expanded.
uint32_t min = JSObject::NewElementsCapacity(old_capacity);
uint32_t new_capacity = length > min ? length : min;
- if (!array->ShouldConvertToSlowElements(new_capacity)) {
- FastElementsAccessorSubclass::SetFastElementsCapacityAndLength(
- array, new_capacity, length);
- JSObject::ValidateElements(array);
- return length_object;
- }
-
- // Request conversion to slow elements.
- return isolate->factory()->undefined_value();
+ FastElementsAccessorSubclass::SetFastElementsCapacityAndLength(
+ array, new_capacity, length);
+ JSObject::ValidateElements(array);
+ return length_object;
}
static Handle<Object> DeleteCommon(Handle<JSObject> obj,
=======================================
--- /trunk/src/factory.cc Wed Jul 23 00:04:36 2014 UTC
+++ /trunk/src/factory.cc Tue Jul 29 08:45:47 2014 UTC
@@ -1796,8 +1796,8 @@
// Put in filler if the new object is smaller than the old.
if (size_difference > 0) {
- Address address = object->address() + map->instance_size();
- heap->CreateFillerObjectAt(address, size_difference);
+ Address address = object->address();
+ heap->CreateFillerObjectAt(address + map->instance_size(),
size_difference);
heap->AdjustLiveBytes(address, -size_difference, Heap::FROM_MUTATOR);
}
=======================================
--- /trunk/src/gc-tracer.cc Mon Jul 28 00:04:53 2014 UTC
+++ /trunk/src/gc-tracer.cc Tue Jul 29 08:45:47 2014 UTC
@@ -32,8 +32,13 @@
end_memory_size(0),
start_holes_size(0),
end_holes_size(0),
+ cumulative_incremental_marking_steps(0),
incremental_marking_steps(0),
- incremental_marking_duration(0.0) {
+ cumulative_incremental_marking_bytes(0),
+ incremental_marking_bytes(0),
+ cumulative_incremental_marking_duration(0.0),
+ incremental_marking_duration(0.0),
+ longest_incremental_marking_step(0.0) {
for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
scopes[i] = 0;
}
@@ -67,8 +72,9 @@
GCTracer::GCTracer(Heap* heap)
: heap_(heap),
- incremental_marking_steps_(0),
- incremental_marking_duration_(0.0),
+ cumulative_incremental_marking_steps_(0),
+ cumulative_incremental_marking_bytes_(0),
+ cumulative_incremental_marking_duration_(0.0),
longest_incremental_marking_step_(0.0) {
current_ = Event(Event::START, NULL, NULL);
current_.end_time = base::OS::TimeCurrentMillis();
@@ -93,8 +99,12 @@
current_.start_memory_size =
heap_->isolate()->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
- current_.incremental_marking_steps = incremental_marking_steps_;
- current_.incremental_marking_duration = incremental_marking_duration_;
+ current_.cumulative_incremental_marking_steps =
+ cumulative_incremental_marking_steps_;
+ current_.cumulative_incremental_marking_bytes =
+ cumulative_incremental_marking_bytes_;
+ current_.cumulative_incremental_marking_duration =
+ cumulative_incremental_marking_duration_;
current_.longest_incremental_marking_step =
longest_incremental_marking_step_;
for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
@@ -110,13 +120,29 @@
current_.end_holes_size = CountTotalHolesSize(heap_);
if (current_.type == Event::SCAVENGER) {
+ current_.incremental_marking_steps =
+ current_.cumulative_incremental_marking_steps -
+ previous_.cumulative_incremental_marking_steps;
+ current_.incremental_marking_bytes =
+ current_.cumulative_incremental_marking_bytes -
+ previous_.cumulative_incremental_marking_bytes;
+ current_.incremental_marking_duration =
+ current_.cumulative_incremental_marking_duration -
+ previous_.cumulative_incremental_marking_duration;
scavenger_events_.push_front(current_);
} else {
+ current_.incremental_marking_steps =
+ current_.cumulative_incremental_marking_steps -
+
previous_mark_compactor_event_.cumulative_incremental_marking_steps;
+ current_.incremental_marking_bytes =
+ current_.cumulative_incremental_marking_bytes -
+
previous_mark_compactor_event_.cumulative_incremental_marking_bytes;
+ current_.incremental_marking_duration =
+ current_.cumulative_incremental_marking_duration -
+
previous_mark_compactor_event_.cumulative_incremental_marking_duration;
+ longest_incremental_marking_step_ = 0.0;
mark_compactor_events_.push_front(current_);
}
-
- if (current_.type == Event::MARK_COMPACTOR)
- longest_incremental_marking_step_ = 0.0;
// TODO(ernstm): move the code below out of GCTracer.
@@ -142,9 +168,10 @@
}
-void GCTracer::AddIncrementalMarkingStep(double duration) {
- incremental_marking_steps_++;
- incremental_marking_duration_ += duration;
+void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
+ cumulative_incremental_marking_steps_++;
+ cumulative_incremental_marking_bytes_ += bytes;
+ cumulative_incremental_marking_duration_ += duration;
longest_incremental_marking_step_ =
Max(longest_incremental_marking_step_, duration);
}
@@ -165,24 +192,19 @@
double duration = current_.end_time - current_.start_time;
PrintF("%.1f ms", duration);
if (current_.type == Event::SCAVENGER) {
- int steps = current_.incremental_marking_steps -
- previous_.incremental_marking_steps;
- if (steps > 0) {
+ if (current_.incremental_marking_steps > 0) {
PrintF(" (+ %.1f ms in %d steps since last GC)",
- current_.incremental_marking_duration -
- previous_.incremental_marking_duration,
- steps);
+ current_.incremental_marking_duration,
+ current_.incremental_marking_steps);
}
} else {
- int steps = current_.incremental_marking_steps -
- previous_mark_compactor_event_.incremental_marking_steps;
- if (steps > 0) {
+ if (current_.incremental_marking_steps > 0) {
PrintF(
" (+ %.1f ms in %d steps since start of marking, "
"biggest step %.1f ms)",
- current_.incremental_marking_duration -
- previous_mark_compactor_event_.incremental_marking_duration,
- steps, current_.longest_incremental_marking_step);
+ current_.incremental_marking_duration,
+ current_.incremental_marking_steps,
+ current_.longest_incremental_marking_step);
}
}
@@ -252,18 +274,14 @@
PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
if (current_.type == Event::SCAVENGER) {
- PrintF("stepscount=%d ", current_.incremental_marking_steps -
- previous_.incremental_marking_steps);
- PrintF("stepstook=%.1f ", current_.incremental_marking_duration -
- previous_.incremental_marking_duration);
+ PrintF("steps_count=%d ", current_.incremental_marking_steps);
+ PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
} else {
- PrintF("stepscount=%d ",
- current_.incremental_marking_steps -
- previous_mark_compactor_event_.incremental_marking_steps);
- PrintF("stepstook=%.1f ",
- current_.incremental_marking_duration -
-
previous_mark_compactor_event_.incremental_marking_duration);
- PrintF("longeststep=%.1f ", current_.longest_incremental_marking_step);
+ PrintF("steps_count=%d ", current_.incremental_marking_steps);
+ PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
+ PrintF("longest_step=%.1f ",
current_.longest_incremental_marking_step);
+ PrintF("marking_throughput=%" V8_PTR_PREFIX "d ",
+ MarkingSpeedInBytesPerMillisecond());
}
PrintF("\n");
@@ -299,19 +317,66 @@
double GCTracer::MeanIncrementalMarkingDuration() const {
- if (mark_compactor_events_.empty()) return 0.0;
+ if (cumulative_incremental_marking_steps_ == 0) return 0.0;
+
+ // We haven't completed an entire round of incremental marking, yet.
+ // Use data from GCTracer instead of data from event buffers.
+ if (mark_compactor_events_.empty()) {
+ return cumulative_incremental_marking_duration_ /
+ cumulative_incremental_marking_steps_;
+ }
+
+ int steps = 0;
+ double durations = 0.0;
+ EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+ while (iter != mark_compactor_events_.end()) {
+ steps += iter->incremental_marking_steps;
+ durations += iter->incremental_marking_duration;
+ ++iter;
+ }
+
+ if (steps == 0) return 0.0;
- EventBuffer::const_iterator last_mc = mark_compactor_events_.begin();
- return last_mc->incremental_marking_duration /
- last_mc->incremental_marking_steps;
+ return durations / steps;
}
double GCTracer::MaxIncrementalMarkingDuration() const {
- if (mark_compactor_events_.empty()) return 0.0;
+ // We haven't completed an entire round of incremental marking, yet.
+ // Use data from GCTracer instead of data from event buffers.
+ if (mark_compactor_events_.empty()) return
longest_incremental_marking_step_;
+
+ double max_duration = 0.0;
+ EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+ while (iter != mark_compactor_events_.end())
+ max_duration = Max(iter->longest_incremental_marking_step,
max_duration);
+
+ return max_duration;
+}
+
+
+intptr_t GCTracer::MarkingSpeedInBytesPerMillisecond() const {
+ if (cumulative_incremental_marking_duration_ == 0.0) return 0;
+
+ // We haven't completed an entire round of incremental marking, yet.
+ // Use data from GCTracer instead of data from event buffers.
+ if (mark_compactor_events_.empty()) {
+ return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ /
+ cumulative_incremental_marking_duration_);
+ }
+
+ intptr_t bytes = 0;
+ double durations = 0.0;
+ EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+ while (iter != mark_compactor_events_.end()) {
+ bytes += iter->incremental_marking_bytes;
+ durations += iter->incremental_marking_duration;
+ ++iter;
+ }
+
+ if (durations == 0.0) return 0;
- EventBuffer::const_iterator last_mc = mark_compactor_events_.begin();
- return last_mc->longest_incremental_marking_step;
+ return static_cast<intptr_t>(bytes / durations);
}
}
} // namespace v8::internal
=======================================
--- /trunk/src/gc-tracer.h Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/src/gc-tracer.h Tue Jul 29 08:45:47 2014 UTC
@@ -172,10 +172,28 @@
// Number of incremental marking steps since creation of tracer.
// (value at start of event)
+ int cumulative_incremental_marking_steps;
+
+ // Incremental marking steps since
+ // - last event for SCAVENGER events
+ // - last MARK_COMPACTOR event for MARK_COMPACTOR events
int incremental_marking_steps;
+ // Bytes marked since creation of tracer (value at start of event).
+ intptr_t cumulative_incremental_marking_bytes;
+
+ // Bytes marked since
+ // - last event for SCAVENGER events
+ // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+ intptr_t incremental_marking_bytes;
+
// Cumulative duration of incremental marking steps since creation of
// tracer. (value at start of event)
+ double cumulative_incremental_marking_duration;
+
+ // Duration of incremental marking steps since
+ // - last event for SCAVENGER events
+ // - last MARK_COMPACTOR event for MARK_COMPACTOR events
double incremental_marking_duration;
// Longest incremental marking step since start of marking.
@@ -200,7 +218,7 @@
void Stop();
// Log an incremental marking step.
- void AddIncrementalMarkingStep(double duration);
+ void AddIncrementalMarkingStep(double duration, intptr_t bytes);
// Compute the mean duration of the last scavenger events. Returns 0 if
no
// events have been recorded.
@@ -232,6 +250,10 @@
// Returns 0 if no incremental marking round has been completed.
double MaxIncrementalMarkingDuration() const;
+ // Compute the average incremental marking speed in bytes/second.
Returns 0 if
+ // no events have been recorded.
+ intptr_t MarkingSpeedInBytesPerMillisecond() const;
+
private:
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -267,10 +289,14 @@
EventBuffer mark_compactor_events_;
// Cumulative number of incremental marking steps since creation of
tracer.
- int incremental_marking_steps_;
+ int cumulative_incremental_marking_steps_;
+
+ // Cumulative size of incremental marking steps (in bytes) since
creation of
+ // tracer.
+ intptr_t cumulative_incremental_marking_bytes_;
// Cumulative duration of incremental marking steps since creation of
tracer.
- double incremental_marking_duration_;
+ double cumulative_incremental_marking_duration_;
// Longest incremental marking step since start of marking.
double longest_incremental_marking_step_;
=======================================
--- /trunk/src/globals.h Mon Jul 21 00:04:41 2014 UTC
+++ /trunk/src/globals.h Tue Jul 29 08:45:47 2014 UTC
@@ -216,11 +216,15 @@
const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
-// Tag information for Failure.
-// TODO(yangguo): remove this from space owner calculation.
-const int kFailureTag = 3;
-const int kFailureTagSize = 2;
-const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
+// The owner field of a page is tagged with the page header tag. We need
that
+// to find out if a slot is part of a large object. If we mask out the
lower
+// 0xfffff bits (1M pages), go to the owner offset, and see that this field
+// is tagged with the page header tag, we can just look up the owner.
+// Otherwise, we know that we are somewhere (not within the first 1M) in a
+// large object.
+const int kPageHeaderTag = 3;
+const int kPageHeaderTagSize = 2;
+const intptr_t kPageHeaderTagMask = (1 << kPageHeaderTagSize) - 1;
// Zap-value: The value used for zapping dead objects.
=======================================
--- /trunk/src/heap.cc Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/src/heap.cc Tue Jul 29 08:45:47 2014 UTC
@@ -54,8 +54,7 @@
isolate_(NULL),
code_range_size_(0),
// semispace_size_ should be a power of 2 and old_generation_size_
should
- // be
- // a multiple of Page::kPageSize.
+ // be a multiple of Page::kPageSize.
reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
@@ -3303,8 +3302,7 @@
// for concurrent sweeping. The WasSwept predicate for concurrently swept
// pages is set after sweeping all pages.
return (!is_in_old_pointer_space && !is_in_old_data_space) ||
- page->WasSwept() ||
- (page->parallel_sweeping() <= MemoryChunk::SWEEPING_FINALIZE);
+ page->WasSwept() || page->SweepingCompleted();
}
@@ -4229,9 +4227,6 @@
void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- HistogramTimerScope idle_notification_scope(
- isolate_->counters()->gc_incremental_marking());
-
incremental_marking()->Step(step_size,
IncrementalMarking::NO_GC_VIA_STACK_GUARD);
@@ -4255,6 +4250,9 @@
bool Heap::IdleNotification(int hint) {
+ // If incremental marking is off, we do not perform idle notification.
+ if (!FLAG_incremental_marking) return true;
+
// Hints greater than this value indicate that
// the embedder is requesting a lot of GC work.
const int kMaxHint = 1000;
@@ -4268,6 +4266,7 @@
intptr_t step_size =
size_factor * IncrementalMarking::kAllocatedThreshold;
+ isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(hint);
HistogramTimerScope idle_notification_scope(
isolate_->counters()->gc_idle_notification());
@@ -4289,10 +4288,6 @@
StartIdleRound();
return false;
}
-
- if (!FLAG_incremental_marking || isolate_->serializer_enabled()) {
- return IdleGlobalGC();
- }
// By doing small chunks of GC work in each IdleNotification,
// perform a round of incremental GCs and after that wait until
@@ -4346,66 +4341,6 @@
return false;
}
-
-
-bool Heap::IdleGlobalGC() {
- static const int kIdlesBeforeScavenge = 4;
- static const int kIdlesBeforeMarkSweep = 7;
- static const int kIdlesBeforeMarkCompact = 8;
- static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
- static const unsigned int kGCsBetweenCleanup = 4;
-
- if (!last_idle_notification_gc_count_init_) {
- last_idle_notification_gc_count_ = gc_count_;
- last_idle_notification_gc_count_init_ = true;
- }
-
- bool uncommit = true;
- bool finished = false;
-
- // Reset the number of idle notifications received when a number of
- // GCs have taken place. This allows another round of cleanup based
- // on idle notifications if enough work has been carried out to
- // provoke a number of garbage collections.
- if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
- number_idle_notifications_ =
- Min(number_idle_notifications_ + 1, kMaxIdleCount);
- } else {
- number_idle_notifications_ = 0;
- last_idle_notification_gc_count_ = gc_count_;
- }
-
- if (number_idle_notifications_ == kIdlesBeforeScavenge) {
- CollectGarbage(NEW_SPACE, "idle notification");
- new_space_.Shrink();
- last_idle_notification_gc_count_ = gc_count_;
- } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
- // Before doing the mark-sweep collections we clear the
- // compilation cache to avoid hanging on to source code and
- // generated code for cached functions.
- isolate_->compilation_cache()->Clear();
-
- CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
- new_space_.Shrink();
- last_idle_notification_gc_count_ = gc_count_;
-
- } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
- CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
- new_space_.Shrink();
- last_idle_notification_gc_count_ = gc_count_;
- number_idle_notifications_ = 0;
- finished = true;
- } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
- // If we have received more than kIdlesBeforeMarkCompact idle
- // notifications we do not perform any cleanup because we don't
- // expect to gain much by doing so.
- finished = true;
- }
-
- if (uncommit) UncommitFromSpace();
-
- return finished;
-}
#ifdef DEBUG
=======================================
--- /trunk/src/heap.h Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/src/heap.h Tue Jul 29 08:45:47 2014 UTC
@@ -2077,9 +2077,6 @@
int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
return heap_size_mb / kMbPerMs;
}
-
- // Returns true if no more GC work is left.
- bool IdleGlobalGC();
void AdvanceIdleIncrementalMarking(intptr_t step_size);
=======================================
--- /trunk/src/ic.cc Mon Jul 28 00:04:53 2014 UTC
+++ /trunk/src/ic.cc Tue Jul 29 08:45:47 2014 UTC
@@ -941,8 +941,7 @@
Handle<HeapType> type = receiver_type();
Handle<JSObject> holder(lookup->holder());
bool receiver_is_holder = object.is_identical_to(holder);
- NamedLoadHandlerCompiler compiler(isolate(), handler_kind(),
kNoExtraICState,
- cache_holder);
+ NamedLoadHandlerCompiler compiler(isolate(), cache_holder);
switch (lookup->type()) {
case FIELD: {
@@ -1392,7 +1391,7 @@
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<JSObject> holder(lookup->holder());
- NamedStoreHandlerCompiler compiler(isolate(), kind());
+ NamedStoreHandlerCompiler compiler(isolate());
if (lookup->IsTransition()) {
// Explicitly pass in the receiver map since LookupForWrite may have
=======================================
--- /trunk/src/incremental-marking.cc Mon Jul 28 00:04:53 2014 UTC
+++ /trunk/src/incremental-marking.cc Tue Jul 29 08:45:47 2014 UTC
@@ -853,104 +853,112 @@
if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
- // The marking speed is driven either by the allocation rate or by the
rate
- // at which we are having to check the color of objects in the write
barrier.
- // It is possible for a tight non-allocating loop to run a lot of write
- // barriers before we get here and check them (marking can only take
place on
- // allocation), so to reduce the lumpiness we don't use the write
barriers
- // invoked since last step directly to determine the amount of work to
do.
- intptr_t bytes_to_process =
- marking_speed_ * Max(allocated_,
write_barriers_invoked_since_last_step_);
- allocated_ = 0;
- write_barriers_invoked_since_last_step_ = 0;
+ {
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ double start = base::OS::TimeCurrentMillis();
- bytes_scanned_ += bytes_to_process;
+ // The marking speed is driven either by the allocation rate or by the
rate
+ // at which we are having to check the color of objects in the write
+ // barrier.
+ // It is possible for a tight non-allocating loop to run a lot of write
+ // barriers before we get here and check them (marking can only take
place
+ // on
+ // allocation), so to reduce the lumpiness we don't use the write
barriers
+ // invoked since last step directly to determine the amount of work to
do.
+ intptr_t bytes_to_process =
+ marking_speed_ *
+ Max(allocated_, write_barriers_invoked_since_last_step_);
+ allocated_ = 0;
+ write_barriers_invoked_since_last_step_ = 0;
- double start = base::OS::TimeCurrentMillis();
+ bytes_scanned_ += bytes_to_process;
- if (state_ == SWEEPING) {
- if (heap_->mark_compact_collector()->sweeping_in_progress() &&
- heap_->mark_compact_collector()->IsSweepingCompleted()) {
- heap_->mark_compact_collector()->EnsureSweepingCompleted();
+ if (state_ == SWEEPING) {
+ if (heap_->mark_compact_collector()->sweeping_in_progress() &&
+ heap_->mark_compact_collector()->IsSweepingCompleted()) {
+ heap_->mark_compact_collector()->EnsureSweepingCompleted();
+ }
+ if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+ bytes_scanned_ = 0;
+ StartMarking(PREVENT_COMPACTION);
+ }
+ } else if (state_ == MARKING) {
+ ProcessMarkingDeque(bytes_to_process);
+ if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
- if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
- bytes_scanned_ = 0;
- StartMarking(PREVENT_COMPACTION);
- }
- } else if (state_ == MARKING) {
- ProcessMarkingDeque(bytes_to_process);
- if (marking_deque_.IsEmpty()) MarkingComplete(action);
- }
- steps_count_++;
+ steps_count_++;
- bool speed_up = false;
+ bool speed_up = false;
- if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
- if (FLAG_trace_gc) {
- PrintPID("Speed up marking after %d steps\n",
- static_cast<int>(kMarkingSpeedAccellerationInterval));
+ if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
+ if (FLAG_trace_gc) {
+ PrintPID("Speed up marking after %d steps\n",
+ static_cast<int>(kMarkingSpeedAccellerationInterval));
+ }
+ speed_up = true;
}
- speed_up = true;
- }
- bool space_left_is_very_small =
- (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+ bool space_left_is_very_small =
+ (old_generation_space_available_at_start_of_incremental_ < 10 *
MB);
- bool only_1_nth_of_space_that_was_available_still_left =
- (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
- old_generation_space_available_at_start_of_incremental_);
-
- if (space_left_is_very_small ||
- only_1_nth_of_space_that_was_available_still_left) {
- if (FLAG_trace_gc) PrintPID("Speed up marking because of low space
left\n");
- speed_up = true;
- }
+ bool only_1_nth_of_space_that_was_available_still_left =
+ (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
+ old_generation_space_available_at_start_of_incremental_);
- bool size_of_old_space_multiplied_by_n_during_marking =
- (heap_->PromotedTotalSize() >
- (marking_speed_ + 1) *
- old_generation_space_used_at_start_of_incremental_);
- if (size_of_old_space_multiplied_by_n_during_marking) {
- speed_up = true;
- if (FLAG_trace_gc) {
- PrintPID("Speed up marking because of heap size increase\n");
+ if (space_left_is_very_small ||
+ only_1_nth_of_space_that_was_available_still_left) {
+ if (FLAG_trace_gc)
+ PrintPID("Speed up marking because of low space left\n");
+ speed_up = true;
}
- }
-
- int64_t promoted_during_marking = heap_->PromotedTotalSize()
- - old_generation_space_used_at_start_of_incremental_;
- intptr_t delay = marking_speed_ * MB;
- intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
- // We try to scan at at least twice the speed that we are allocating.
- if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack +
delay) {
- if (FLAG_trace_gc) {
- PrintPID("Speed up marking because marker was not keeping up\n");
+ bool size_of_old_space_multiplied_by_n_during_marking =
+ (heap_->PromotedTotalSize() >
+ (marking_speed_ + 1) *
+ old_generation_space_used_at_start_of_incremental_);
+ if (size_of_old_space_multiplied_by_n_during_marking) {
+ speed_up = true;
+ if (FLAG_trace_gc) {
+ PrintPID("Speed up marking because of heap size increase\n");
+ }
}
- speed_up = true;
- }
+
+ int64_t promoted_during_marking =
+ heap_->PromotedTotalSize() -
+ old_generation_space_used_at_start_of_incremental_;
+ intptr_t delay = marking_speed_ * MB;
+ intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
- if (speed_up) {
- if (state_ != MARKING) {
+ // We try to scan at at least twice the speed that we are allocating.
+ if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack +
delay) {
if (FLAG_trace_gc) {
- PrintPID("Postponing speeding up marking until marking starts\n");
+ PrintPID("Speed up marking because marker was not keeping up\n");
}
- } else {
- marking_speed_ += kMarkingSpeedAccelleration;
- marking_speed_ = static_cast<int>(
- Min(kMaxMarkingSpeed,
- static_cast<intptr_t>(marking_speed_ * 1.3)));
- if (FLAG_trace_gc) {
- PrintPID("Marking speed increased to %d\n", marking_speed_);
+ speed_up = true;
+ }
+
+ if (speed_up) {
+ if (state_ != MARKING) {
+ if (FLAG_trace_gc) {
+ PrintPID("Postponing speeding up marking until marking
starts\n");
+ }
+ } else {
+ marking_speed_ += kMarkingSpeedAccelleration;
+ marking_speed_ = static_cast<int>(
+ Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ *
1.3)));
+ if (FLAG_trace_gc) {
+ PrintPID("Marking speed increased to %d\n", marking_speed_);
+ }
}
}
+
+ double end = base::OS::TimeCurrentMillis();
+ double duration = (end - start);
+ heap_->tracer()->AddIncrementalMarkingStep(duration, allocated_bytes);
+ heap_->AddMarkingTime(duration);
}
-
- double end = base::OS::TimeCurrentMillis();
- double delta = (end - start);
- heap_->tracer()->AddIncrementalMarkingStep(delta);
- heap_->AddMarkingTime(delta);
}
=======================================
--- /trunk/src/list-inl.h Tue Jul 1 11:58:10 2014 UTC
+++ /trunk/src/list-inl.h Tue Jul 29 08:45:47 2014 UTC
@@ -220,7 +220,7 @@
int low = 0;
int high = list.length() - 1;
while (low <= high) {
- int mid = low + (high - low) / 2;
+ int mid = (low + high) / 2;
T mid_elem = list[mid];
if (cmp(&mid_elem) > 0) {
=======================================
--- /trunk/src/lithium.cc Thu Jul 10 00:04:42 2014 UTC
+++ /trunk/src/lithium.cc Tue Jul 29 08:45:47 2014 UTC
@@ -438,6 +438,8 @@
LOG_CODE_EVENT(info()->isolate(),
CodeStartLinePosInfoRecordEvent(
assembler.positions_recorder()));
+ // TODO(yangguo) remove this once the code serializer handles code stubs.
+ if (info()->will_serialize()) assembler.enable_serializer();
LCodeGen generator(this, &assembler, info());
MarkEmptyBlocks();
=======================================
--- /trunk/src/objects-inl.h Mon Jul 28 00:04:53 2014 UTC
+++ /trunk/src/objects-inl.h Tue Jul 29 08:45:47 2014 UTC
@@ -2540,6 +2540,7 @@
void ConstantPoolArray::set(int index, Object* value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(!GetHeap()->InNewSpace(value));
ASSERT(get_type(index) == HEAP_PTR);
WRITE_FIELD(this, OffsetOfElementAt(index), value);
WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
@@ -2584,6 +2585,7 @@
void ConstantPoolArray::set_at_offset(int offset, Object* value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(!GetHeap()->InNewSpace(value));
ASSERT(offset_is_type(offset, HEAP_PTR));
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
=======================================
--- /trunk/src/objects.cc Mon Jul 28 12:25:10 2014 UTC
+++ /trunk/src/objects.cc Tue Jul 29 08:45:47 2014 UTC
@@ -4584,12 +4584,14 @@
int new_instance_size = new_map->instance_size();
int instance_size_delta = map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- Heap* heap = isolate->heap();
- heap->CreateFillerObjectAt(object->address() + new_instance_size,
- instance_size_delta);
- heap->AdjustLiveBytes(object->address(),
- -instance_size_delta,
- Heap::FROM_MUTATOR);
+
+ if (instance_size_delta > 0) {
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta);
+ heap->AdjustLiveBytes(object->address(), -instance_size_delta,
+ Heap::FROM_MUTATOR);
+ }
// We are storing the new map using release store after creating a
filler for
// the left-over space to avoid races with the sweeper thread.
@@ -6636,7 +6638,12 @@
if (is_observed) {
if (is_element) {
Maybe<bool> maybe = HasOwnElement(object, index);
- ASSERT(maybe.has_value);
+ // Workaround for a GCC 4.4.3 bug which leads to "'preexists' may be
used
+ // uninitialized in this function".
+ if (!maybe.has_value) {
+ ASSERT(false);
+ return isolate->factory()->undefined_value();
+ }
preexists = maybe.value;
if (preexists && GetOwnElementAccessorPair(object, index).is_null())
{
old_value =
@@ -11733,6 +11740,17 @@
MaybeHandle<Object> JSArray::SetElementsLength(
Handle<JSArray> array,
Handle<Object> new_length_handle) {
+ if (array->HasFastElements()) {
+ // If the new array won't fit in a some non-trivial fraction of the
max old
+ // space size, then force it to go dictionary mode.
+ int max_fast_array_size = static_cast<int>(
+ (array->GetHeap()->MaxOldGenerationSize() / kDoubleSize) / 4);
+ if (new_length_handle->IsNumber() &&
+ NumberToInt32(*new_length_handle) >= max_fast_array_size) {
+ NormalizeElements(array);
+ }
+ }
+
// We should never end in here with a pixel or external array.
ASSERT(array->AllowsSetElementsLength());
if (!array->map()->is_observed()) {
=======================================
--- /trunk/src/runtime.cc Mon Jul 28 12:25:10 2014 UTC
+++ /trunk/src/runtime.cc Tue Jul 29 08:45:47 2014 UTC
@@ -10574,15 +10574,39 @@
// How many elements does this object/array have?
RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSArray, object, 0);
- HeapObject* elements = object->elements();
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ Handle<FixedArrayBase> elements(array->elements(), isolate);
+ SealHandleScope shs(isolate);
if (elements->IsDictionary()) {
- int result =
SeededNumberDictionary::cast(elements)->NumberOfElements();
+ int result =
+ Handle<SeededNumberDictionary>::cast(elements)->NumberOfElements();
return Smi::FromInt(result);
} else {
- return object->length();
+ ASSERT(array->length()->IsSmi());
+ // For packed elements, we know the exact number of elements
+ int length = elements->length();
+ ElementsKind kind = array->GetElementsKind();
+ if (IsFastPackedElementsKind(kind)) {
+ return Smi::FromInt(length);
+ }
+ // For holey elements, take samples from the buffer checking for holes
+ // to generate the estimate.
+ const int kNumberOfHoleCheckSamples = 97;
+ int increment = (length < kNumberOfHoleCheckSamples)
+ ? 1
+ : static_cast<int>(length /
kNumberOfHoleCheckSamples);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ int holes = 0;
+ for (int i = 0; i < length; i += increment) {
+ if (!accessor->HasElement(array, array, i, elements)) {
+ ++holes;
+ }
+ }
+ int estimate = static_cast<int>((kNumberOfHoleCheckSamples - holes) /
+ kNumberOfHoleCheckSamples * length);
+ return Smi::FromInt(estimate);
}
}
@@ -14954,6 +14978,15 @@
Handle<AllocationSite>::null(),
caller_args);
}
+
+
+RUNTIME_FUNCTION(Runtime_NormalizeElements) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
+ JSObject::NormalizeElements(array);
+ return *array;
+}
RUNTIME_FUNCTION(Runtime_MaxSmi) {
=======================================
--- /trunk/src/runtime.h Thu Jul 24 00:04:58 2014 UTC
+++ /trunk/src/runtime.h Tue Jul 29 08:45:47 2014 UTC
@@ -238,6 +238,7 @@
F(GetArrayKeys, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
+ F(NormalizeElements, 1, 1) \
\
/* Getters and Setters */ \
F(LookupAccessor, 3, 1) \
=======================================
--- /trunk/src/spaces.cc Mon Jul 21 00:04:41 2014 UTC
+++ /trunk/src/spaces.cc Tue Jul 29 08:45:47 2014 UTC
@@ -58,7 +58,7 @@
page->area_end(),
kOnePageOnly,
size_func);
- ASSERT(page->WasSweptPrecisely());
+ ASSERT(page->WasSweptPrecisely() || page->SweepingCompleted());
}
=======================================
--- /trunk/src/spaces.h Mon Jul 21 00:04:41 2014 UTC
+++ /trunk/src/spaces.h Tue Jul 29 08:45:47 2014 UTC
@@ -312,20 +312,20 @@
}
Space* owner() const {
- if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
- kFailureTag) {
+ if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+ kPageHeaderTag) {
return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
- kFailureTag);
+ kPageHeaderTag);
} else {
return NULL;
}
}
void set_owner(Space* space) {
- ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
- owner_ = reinterpret_cast<Address>(space) + kFailureTag;
- ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
- kFailureTag);
+ ASSERT((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
+ owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
+ ASSERT((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+ kPageHeaderTag);
}
base::VirtualMemory* reserved_memory() {
@@ -477,6 +477,8 @@
¶llel_sweeping_, SWEEPING_PENDING, SWEEPING_IN_PROGRESS) ==
SWEEPING_PENDING;
}
+
+ bool SweepingCompleted() { return parallel_sweeping() <=
SWEEPING_FINALIZE; }
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
=======================================
--- /trunk/src/store-buffer.cc Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/src/store-buffer.cc Tue Jul 29 08:45:47 2014 UTC
@@ -505,9 +505,9 @@
}
}
} else {
- if (page->parallel_sweeping() >
MemoryChunk::SWEEPING_FINALIZE) {
+ if (!page->SweepingCompleted()) {
heap_->mark_compact_collector()->SweepInParallel(page,
owner);
- if (page->parallel_sweeping() >
MemoryChunk::SWEEPING_FINALIZE) {
+ if (!page->SweepingCompleted()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page.
// TODO(hpayer): This may introduce a huge pause here. We
=======================================
--- /trunk/src/stub-cache.cc Mon Jul 28 00:04:53 2014 UTC
+++ /trunk/src/stub-cache.cc Tue Jul 29 08:45:47 2014 UTC
@@ -195,13 +195,11 @@
}
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
- Code::Kind handler_kind = Code::LOAD_IC;
Handle<Code> handler = PropertyHandlerCompiler::Find(
- cache_name, stub_holder_map, handler_kind, flag, Code::FAST);
+ cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST);
if (!handler.is_null()) return handler;
- NamedLoadHandlerCompiler compiler(isolate_, handler_kind,
kNoExtraICState,
- flag);
+ NamedLoadHandlerCompiler compiler(isolate_, flag);
handler = compiler.CompileLoadNonexistent(type, last, cache_name);
Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
return handler;
@@ -1137,7 +1135,7 @@
Handle<Name> name,
InlineCacheState state) {
Code::Flags flags =
- Code::ComputeFlags(kind, state, extra_state(), type, cache_holder());
+ Code::ComputeFlags(kind, state, extra_ic_state_, type,
cache_holder());
Handle<Code> code = GetCodeWithFlags(flags, name);
IC::RegisterWeakMapDependency(code);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
@@ -1148,7 +1146,6 @@
Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name) {
- ASSERT_EQ(kNoExtraICState, extra_state());
Code::Flags flags = Code::ComputeHandlerFlags(kind, type,
cache_holder());
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, *name));
=======================================
--- /trunk/src/stub-cache.h Mon Jul 28 00:04:53 2014 UTC
+++ /trunk/src/stub-cache.h Tue Jul 29 08:45:47 2014 UTC
@@ -289,18 +289,15 @@
protected:
PropertyAccessCompiler(Isolate* isolate, Code::Kind kind,
- ExtraICState extra_ic_state,
CacheHolderFlag cache_holder)
: registers_(GetCallingConvention(kind)),
kind_(kind),
cache_holder_(cache_holder),
isolate_(isolate),
- extra_ic_state_(extra_ic_state),
masm_(isolate, NULL, 256) {}
Code::Kind kind() const { return kind_; }
CacheHolderFlag cache_holder() const { return cache_holder_; }
- ExtraICState extra_state() const { return extra_ic_state_; }
MacroAssembler* masm() { return &masm_; }
Isolate* isolate() const { return isolate_; }
Heap* heap() const { return isolate()->heap(); }
@@ -332,7 +329,6 @@
CacheHolderFlag cache_holder_;
Isolate* isolate_;
- const ExtraICState extra_ic_state_;
MacroAssembler masm_;
};
@@ -342,11 +338,12 @@
PropertyICCompiler(Isolate* isolate, Code::Kind kind,
ExtraICState extra_ic_state = kNoExtraICState,
CacheHolderFlag cache_holder = kCacheOnReceiver)
- : PropertyAccessCompiler(isolate, kind, extra_ic_state,
cache_holder) {}
+ : PropertyAccessCompiler(isolate, kind, cache_holder),
+ extra_ic_state_(extra_ic_state) {}
static Handle<Code> Find(Handle<Name> name, Handle<Map> stub_holder_map,
Code::Kind kind,
- ExtraICState extra_state = kNoExtraICState,
+ ExtraICState extra_ic_state = kNoExtraICState,
CacheHolderFlag cache_holder =
kCacheOnReceiver);
Handle<Code> CompileLoadInitialize(Code::Flags flags);
@@ -397,6 +394,7 @@
Handle<Code> CompileIndexedStorePolymorphic(MapHandleList* receiver_maps,
CodeHandleList*
handler_stubs,
MapHandleList*
transitioned_maps);
+ const ExtraICState extra_ic_state_;
};
@@ -407,9 +405,8 @@
protected:
PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind,
- ExtraICState extra_ic_state,
CacheHolderFlag cache_holder)
- : PropertyAccessCompiler(isolate, kind, extra_ic_state,
cache_holder) {}
+ : PropertyAccessCompiler(isolate, kind, cache_holder) {}
virtual ~PropertyHandlerCompiler() {}
@@ -481,10 +478,9 @@
class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
public:
- NamedLoadHandlerCompiler(Isolate* isolate, Code::Kind kind =
Code::LOAD_IC,
- ExtraICState extra_ic_state = kNoExtraICState,
+ NamedLoadHandlerCompiler(Isolate* isolate,
CacheHolderFlag cache_holder = kCacheOnReceiver)
- : PropertyHandlerCompiler(isolate, kind, extra_ic_state,
cache_holder) {}
+ : PropertyHandlerCompiler(isolate, Code::LOAD_IC, cache_holder) {}
virtual ~NamedLoadHandlerCompiler() {}
@@ -594,10 +590,8 @@
class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
public:
- NamedStoreHandlerCompiler(Isolate* isolate, Code::Kind kind =
Code::STORE_IC)
- // Handlers do not use strict mode.
- : PropertyHandlerCompiler(isolate, kind, kNoExtraICState,
- kCacheOnReceiver) {}
+ explicit NamedStoreHandlerCompiler(Isolate* isolate)
+ : PropertyHandlerCompiler(isolate, Code::STORE_IC, kCacheOnReceiver)
{}
virtual ~NamedStoreHandlerCompiler() {}
@@ -696,9 +690,8 @@
class IndexedHandlerCompiler : public PropertyHandlerCompiler {
public:
- IndexedHandlerCompiler(Isolate* isolate,
- ExtraICState extra_ic_state = kNoExtraICState)
- : PropertyHandlerCompiler(isolate, Code::KEYED_LOAD_IC,
extra_ic_state,
+ explicit IndexedHandlerCompiler(Isolate* isolate)
+ : PropertyHandlerCompiler(isolate, Code::KEYED_LOAD_IC,
kCacheOnReceiver) {}
virtual ~IndexedHandlerCompiler() {}
=======================================
--- /trunk/src/version.cc Mon Jul 28 12:25:10 2014 UTC
+++ /trunk/src/version.cc Tue Jul 29 08:45:47 2014 UTC
@@ -34,8 +34,8 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 28
-#define BUILD_NUMBER 43
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 45
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
=======================================
--- /trunk/src/x87/code-stubs-x87.cc Mon Jul 21 00:04:41 2014 UTC
+++ /trunk/src/x87/code-stubs-x87.cc Tue Jul 29 08:45:47 2014 UTC
@@ -558,22 +558,14 @@
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(isolate()->factory()->prototype_string()));
- __ j(not_equal, &miss);
- }
+ Register receiver = LoadIC::ReceiverRegister();
- StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver,
eax,
+ ebx, &miss);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
}
=======================================
--- /trunk/src/x87/stub-cache-x87.cc Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/src/x87/stub-cache-x87.cc Tue Jul 29 08:45:47 2014 UTC
@@ -114,12 +114,9 @@
}
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name,
- Register scratch0,
- Register scratch1) {
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm, Label* miss_label, Register receiver,
+ Handle<Name> name, Register scratch0, Register scratch1) {
ASSERT(name->IsUniqueName());
ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
@@ -233,21 +230,8 @@
}
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler*
masm,
- int index,
- Register prototype)
{
- __ LoadGlobalFunction(index, prototype);
- __ LoadGlobalFunctionInitialMap(prototype, prototype);
- // Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(masm->isolate()->native_context()->get(index)));
@@ -266,50 +250,13 @@
}
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, miss_label);
-
- // Load length directly from the JS array.
- __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
- __ ret(0);
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+ MacroAssembler* masm, Register receiver, Register scratch1,
+ Register scratch2, Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ mov(eax, scratch1);
__ ret(0);
}
-
-
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index,
- Representation representation)
{
- ASSERT(!representation.IsDouble());
- int offset = index * kPointerSize;
- if (!inobject) {
- // Calculate the offset into the properties array.
- offset = offset + FixedArray::kHeaderSize;
- __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- src = dst;
- }
- __ mov(dst, FieldOperand(src, offset));
-}
static void PushInterceptorArguments(MacroAssembler* masm,
@@ -351,14 +298,10 @@
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
// when api call ICs are generated in hydrogen.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization&
optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- bool is_store,
- int argc,
- Register* values) {
+void PropertyHandlerCompiler::GenerateFastApiCall(
+ MacroAssembler* masm, const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver, Register scratch_in,
+ bool is_store, int argc, Register* values) {
// Copy return value.
__ pop(scratch_in);
// receiver
@@ -428,9 +371,9 @@
}
-void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Immediate(name));
@@ -441,11 +384,9 @@
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<JSGlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+ MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+ Register scratch, Label* miss) {
Handle<PropertyCell> cell =
JSGlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
@@ -461,12 +402,9 @@
}
-void StoreStubCompiler::GenerateNegativeHolderLookup(
- MacroAssembler* masm,
- Handle<JSObject> holder,
- Register holder_reg,
- Handle<Name> name,
- Label* miss) {
+void NamedStoreHandlerCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm, Handle<JSObject> holder, Register holder_reg,
+ Handle<Name> name, Label* miss) {
if (holder->IsJSGlobalObject()) {
GenerateCheckPropertyCell(
masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(),
miss);
@@ -479,19 +417,11 @@
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed
if
// store is successful.
-void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* slow) {
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+ MacroAssembler* masm, Handle<JSObject> object, LookupResult* lookup,
+ Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+ Register storage_reg, Register value_reg, Register scratch1,
+ Register scratch2, Register unused, Label* miss_label, Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -647,15 +577,10 @@
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void NamedStoreHandlerCompiler::GenerateStoreField(
+ MacroAssembler* masm, Handle<JSObject> object, LookupResult* lookup,
+ Register receiver_reg, Register name_reg, Register value_reg,
+ Register scratch1, Register scratch2, Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -757,7 +682,8 @@
}
-void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code>
code) {
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
@@ -766,15 +692,10 @@
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<Name> name,
- Label* miss,
- PrototypeCheckType check) {
+Register PropertyHandlerCompiler::CheckPrototypes(
+ Handle<HeapType> type, Register object_reg, Handle<JSObject> holder,
+ Register holder_reg, Register scratch1, Register scratch2,
+ Handle<Name> name, Label* miss, PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure there's no overlap between holder and object registers.
@@ -881,7 +802,7 @@
}
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label*
miss) {
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label*
miss) {
if (!miss->is_unused()) {
Label success;
__ jmp(&success);
@@ -892,7 +813,7 @@
}
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label*
miss) {
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label*
miss) {
if (!miss->is_unused()) {
Label success;
__ jmp(&success);
@@ -903,15 +824,14 @@
}
-Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<HeapType> type,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<Object> callback) {
+Register NamedLoadHandlerCompiler::CallbackFrontend(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject>
holder,
+ Handle<Name> name,
+ Handle<Object>
callback) {
Label miss;
- Register reg = HandlerFrontendHeader(type, object_reg, holder, name,
&miss);
+ Register reg = FrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -957,24 +877,22 @@
__ j(not_equal, &miss);
}
- HandlerFrontendFooter(name, &miss);
+ FrontendFooter(name, &miss);
return reg;
}
-void LoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- FieldIndex field,
- Representation representation) {
+void NamedLoadHandlerCompiler::GenerateLoadField(
+ Register reg, Handle<JSObject> holder, FieldIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
LoadFieldStub stub(isolate(), field);
GenerateTailCall(masm(), stub.GetCode());
}
-void LoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+ Register reg, Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return
address.
ASSERT(!scratch3().is(reg));
__ pop(scratch3()); // Get return address to place it below.
@@ -1018,18 +936,16 @@
}
-void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(eax, value);
__ ret(0);
}
-void LoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<Object> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(
+ Register holder_reg, Handle<Object> object,
+ Handle<JSObject> interceptor_holder, LookupResult* lookup,
Handle<Name> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1124,13 +1040,11 @@
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<JSObject> holder, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = HandlerFrontend(
- IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+ Register holder_reg =
+ Frontend(IC::CurrentTypeOf(object, isolate()), receiver(), holder,
name);
__ pop(scratch1()); // remove the return address
__ push(receiver());
@@ -1154,10 +1068,8 @@
#define __ ACCESS_MASM(masm)
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<HeapType> type,
- Register receiver,
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- esp[0] : return address
@@ -1201,9 +1113,8 @@
#define __ ACCESS_MASM(masm())
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> object,
- Handle<Name> name) {
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+ Handle<JSObject> object, Handle<Name> name) {
__ pop(scratch1()); // remove the return address
__ push(receiver());
__ push(this->name());
@@ -1220,7 +1131,7 @@
}
-void StoreStubCompiler::GenerateStoreArrayLength() {
+void NamedStoreHandlerCompiler::GenerateStoreArrayLength() {
// Prepare tail call to StoreIC_ArrayLength.
__ pop(scratch1()); // remove the return address
__ push(receiver());
@@ -1234,9 +1145,8 @@
}
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
+Handle<Code> PropertyICCompiler::CompileIndexedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
Label miss;
__ JumpIfSmi(receiver(), &miss, Label::kNear);
@@ -1257,15 +1167,13 @@
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetICCode(
- kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(),
POLYMORPHIC);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType>
type,
- Handle<JSObject>
last,
- Handle<Name> name) {
- NonexistentHandlerFrontend(type, last, name);
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
+ Handle<HeapType> type, Handle<JSObject> last, Handle<Name> name) {
+ NonexistentFrontend(type, last, name);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
@@ -1277,7 +1185,7 @@
}
-Register* LoadStubCompiler::registers() {
+Register* PropertyAccessCompiler::load_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3, scratch4.
Register receiver = LoadIC::ReceiverRegister();
Register name = LoadIC::NameRegister();
@@ -1286,21 +1194,7 @@
}
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, ebx, eax, edi, no_reg };
- return registers;
-}
-
-
-Register StoreStubCompiler::value() {
- return StoreIC::ValueRegister();
-}
-
-
-Register* StoreStubCompiler::registers() {
+Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreIC::ReceiverRegister();
Register name = StoreIC::NameRegister();
@@ -1309,7 +1203,7 @@
}
-Register* KeyedStoreStubCompiler::registers() {
+Register* PropertyAccessCompiler::keyed_store_calling_convention() {
// receiver, name, scratch1/map, scratch2, scratch3.
Register receiver = KeyedStoreIC::ReceiverRegister();
Register name = KeyedStoreIC::NameRegister();
@@ -1317,16 +1211,18 @@
static Register registers[] = { receiver, name, map, edi, no_reg };
return registers;
}
+
+
+Register NamedStoreHandlerCompiler::value() { return
StoreIC::ValueRegister(); }
#undef __
#define __ ACCESS_MASM(masm)
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<HeapType> type,
- Register receiver,
- Handle<JSFunction> getter) {
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1359,15 +1255,12 @@
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<HeapType> type,
- Handle<GlobalObject> global,
- Handle<PropertyCell> cell,
- Handle<Name> name,
- bool is_dont_delete) {
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+ Handle<HeapType> type, Handle<GlobalObject> global,
+ Handle<PropertyCell> cell, Handle<Name> name, bool is_dont_delete) {
Label miss;
- HandlerFrontendHeader(type, receiver(), global, name, &miss);
+ FrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
if (masm()->serializer_enabled()) {
__ mov(eax, Immediate(cell));
@@ -1390,19 +1283,18 @@
// The code above already loads the result into the return register.
__ ret(0);
- HandlerFrontendFooter(name, &miss);
+ FrontendFooter(name, &miss);
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
-Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList*
handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
if (check == PROPERTY &&
@@ -1443,7 +1335,7 @@
// Return the generated code.
InlineCacheState state =
number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetICCode(kind(), type, name, state);
+ return GetCode(kind(), type, name, state);
}
@@ -1451,7 +1343,7 @@
#define __ ACCESS_MASM(masm)
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+void IndexedHandlerCompiler::GenerateLoadDictionaryElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
=======================================
--- /trunk/test/cctest/test-constantpool.cc Wed Jun 4 00:06:13 2014 UTC
+++ /trunk/test/cctest/test-constantpool.cc Tue Jul 29 08:45:47 2014 UTC
@@ -31,7 +31,6 @@
TEST(ConstantPoolSmall) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
@@ -51,7 +50,7 @@
// Check getters and setters.
int64_t big_number = V8_2PART_UINT64_C(0x12345678, 9ABCDEF0);
- Handle<Object> object = factory->NewHeapNumber(4.0);
+ Handle<Object> object = factory->NewHeapNumber(4.0, IMMUTABLE, TENURED);
Code* code = DummyCode(&context);
array->set(0, big_number);
array->set(1, 0.5);
@@ -67,21 +66,12 @@
CHECK_EQ(code, array->get_heap_ptr_entry(4));
CHECK_EQ(*object, array->get_heap_ptr_entry(5));
CHECK_EQ(50, array->get_int32_entry(6));
-
- // Check pointers are updated on GC.
- Object* old_ptr = array->get_heap_ptr_entry(5);
- CHECK_EQ(*object, old_ptr);
- heap->CollectGarbage(NEW_SPACE);
- Object* new_ptr = array->get_heap_ptr_entry(5);
- CHECK_NE(*object, old_ptr);
- CHECK_EQ(*object, new_ptr);
}
TEST(ConstantPoolExtended) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
@@ -116,12 +106,14 @@
// Check small and large section's don't overlap.
int64_t small_section_int64 = V8_2PART_UINT64_C(0x56781234, DEF09ABC);
Code* small_section_code_ptr = DummyCode(&context);
- Handle<Object> small_section_heap_ptr = factory->NewHeapNumber(4.0);
+ Handle<Object> small_section_heap_ptr =
+ factory->NewHeapNumber(4.0, IMMUTABLE, TENURED);
int32_t small_section_int32 = 0xab12cd45;
int64_t extended_section_int64 = V8_2PART_UINT64_C(0x12345678, 9ABCDEF0);
Code* extended_section_code_ptr = DummyCode(&context);
- Handle<Object> extended_section_heap_ptr = factory->NewHeapNumber(4.0);
+ Handle<Object> extended_section_heap_ptr =
+ factory->NewHeapNumber(5.0, IMMUTABLE, TENURED);
int32_t extended_section_int32 = 0xef67ab89;
for (int i = array->first_index(ConstantPoolArray::INT64, kSmall);
@@ -178,14 +170,6 @@
CHECK_EQ(extended_section_int32, array->get_int32_entry(i));
}
}
- // Check pointers are updated on GC in extended section.
- int index = array->first_index(ConstantPoolArray::HEAP_PTR, kExtended);
- Object* old_ptr = array->get_heap_ptr_entry(index);
- CHECK_EQ(*extended_section_heap_ptr, old_ptr);
- heap->CollectGarbage(NEW_SPACE);
- Object* new_ptr = array->get_heap_ptr_entry(index);
- CHECK_NE(*extended_section_heap_ptr, old_ptr);
- CHECK_EQ(*extended_section_heap_ptr, new_ptr);
}
@@ -242,3 +226,86 @@
int expected_int32_indexs[] = { 1, 2, 3, 4 };
CheckIterator(array, ConstantPoolArray::INT32, expected_int32_indexs, 4);
}
+
+
+TEST(ConstantPoolPreciseGC) {
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ v8::HandleScope scope(context->GetIsolate());
+
+ ConstantPoolArray::NumberOfEntries small(1, 0, 0, 1);
+ Handle<ConstantPoolArray> array = factory->NewConstantPoolArray(small);
+
+ // Check that the store buffer knows which entries are pointers and
which are
+ // not. To do this, make non-pointer entries which look like new space
+ // pointers but are actually invalid and ensure the GC doesn't try to
move
+ // them.
+ Handle<HeapObject> object = factory->NewHeapNumber(4.0);
+ Object* raw_ptr = *object;
+ // If interpreted as a pointer, this should be right inside the heap
number
+ // which will cause a crash when trying to lookup the 'map' pointer.
+ intptr_t invalid_ptr = reinterpret_cast<intptr_t>(raw_ptr) + kInt32Size;
+ int32_t invalid_ptr_int32 = static_cast<int32_t>(invalid_ptr);
+ int64_t invalid_ptr_int64 = static_cast<int64_t>(invalid_ptr);
+ array->set(0, invalid_ptr_int64);
+ array->set(1, invalid_ptr_int32);
+
+ // Ensure we perform a scan on scavenge for the constant pool's page.
+ MemoryChunk::FromAddress(array->address())->set_scan_on_scavenge(true);
+ heap->CollectGarbage(NEW_SPACE);
+
+ // Check the object was moved by GC.
+ CHECK_NE(*object, raw_ptr);
+
+ // Check the non-pointer entries weren't changed.
+ CHECK_EQ(invalid_ptr_int64, array->get_int64_entry(0));
+ CHECK_EQ(invalid_ptr_int32, array->get_int32_entry(1));
+}
+
+
+TEST(ConstantPoolCompacting) {
+ if (i::FLAG_never_compact) return;
+ i::FLAG_always_compact = true;
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ v8::HandleScope scope(context->GetIsolate());
+
+ ConstantPoolArray::NumberOfEntries small(0, 0, 1, 0);
+ ConstantPoolArray::NumberOfEntries extended(0, 0, 1, 0);
+ Handle<ConstantPoolArray> array =
+ factory->NewExtendedConstantPoolArray(small, extended);
+
+ // Start a second old-space page so that the heap pointer added to the
+ // constant pool array ends up on the an evacuation candidate page.
+ Page* first_page = heap->old_data_space()->anchor()->next_page();
+ {
+ HandleScope scope(isolate);
+ Handle<HeapObject> temp =
+ factory->NewFixedDoubleArray(900 * KB / kDoubleSize, TENURED);
+ CHECK(heap->InOldDataSpace(temp->address()));
+ Handle<HeapObject> heap_ptr =
+ factory->NewHeapNumber(5.0, IMMUTABLE, TENURED);
+ CHECK(heap->InOldDataSpace(heap_ptr->address()));
+ CHECK(!first_page->Contains(heap_ptr->address()));
+ array->set(0, *heap_ptr);
+ array->set(1, *heap_ptr);
+ }
+
+ // Check heap pointers are correctly updated on GC.
+ Object* old_ptr = array->get_heap_ptr_entry(0);
+ Handle<Object> object(old_ptr, isolate);
+ CHECK_EQ(old_ptr, *object);
+ CHECK_EQ(old_ptr, array->get_heap_ptr_entry(1));
+
+ // Force compacting garbage collection.
+ CHECK(FLAG_always_compact);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+
+ CHECK_NE(old_ptr, *object);
+ CHECK_EQ(*object, array->get_heap_ptr_entry(0));
+ CHECK_EQ(*object, array->get_heap_ptr_entry(1));
+}
=======================================
--- /trunk/test/cctest/test-heap.cc Mon Jul 28 00:04:53 2014 UTC
+++ /trunk/test/cctest/test-heap.cc Tue Jul 29 08:45:47 2014 UTC
@@ -4439,6 +4439,58 @@
// This scavenge will corrupt memory if the promotion queue is not
evacuated.
heap->CollectGarbage(NEW_SPACE);
}
+
+
+TEST(Regress388880) {
+ i::FLAG_expose_gc = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+
+ Handle<Map> map1 = Map::Create(isolate->object_function(), 1);
+ Handle<Map> map2 =
+ Map::CopyWithField(map1, factory->NewStringFromStaticAscii("foo"),
+ HeapType::Any(isolate), NONE,
Representation::Tagged(),
+ OMIT_TRANSITION).ToHandleChecked();
+
+ int desired_offset = Page::kPageSize - map1->instance_size();
+
+ // Allocate fixed array in old pointer space so, that object allocated
+ // afterwards would end at the end of the page.
+ {
+ SimulateFullSpace(heap->old_pointer_space());
+ int padding_size = desired_offset - Page::kObjectStartOffset;
+ int padding_array_length =
+ (padding_size - FixedArray::kHeaderSize) / kPointerSize;
+
+ Handle<FixedArray> temp2 =
+ factory->NewFixedArray(padding_array_length, TENURED);
+ Page* page = Page::FromAddress(temp2->address());
+ CHECK_EQ(Page::kObjectStartOffset, page->Offset(temp2->address()));
+ }
+
+ Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED, false);
+ o->set_properties(*factory->empty_fixed_array());
+
+ // Ensure that the object allocated where we need it.
+ Page* page = Page::FromAddress(o->address());
+ CHECK_EQ(desired_offset, page->Offset(o->address()));
+
+ // Now we have an object right at the end of the page.
+
+ // Enable incremental marking to trigger actions in
Heap::AdjustLiveBytes()
+ // that would cause crash.
+ IncrementalMarking* marking = CcTest::heap()->incremental_marking();
+ marking->Abort();
+ marking->Start();
+ CHECK(marking->IsMarking());
+
+ // Now everything is set up for crashing in JSObject::MigrateFastToFast()
+ // when it calls heap->AdjustLiveBytes(...).
+ JSObject::MigrateToMap(o, map2);
+}
#ifdef DEBUG
=======================================
--- /trunk/test/fuzz-natives/base.js Thu May 8 00:04:50 2014 UTC
+++ /trunk/test/fuzz-natives/base.js Tue Jul 29 08:45:47 2014 UTC
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
// TODO(jkummerow): There are many ways to improve these tests, e.g.:
// - more variance in randomized inputs
// - better time complexity management
@@ -15,7 +17,9 @@
result.push(17);
result.push(-31);
result.push(new Array(100));
- result.push(new Array(100003));
+ var a = %NormalizeElements([]);
+ a.length = 100003;
+ result.push(a);
result.push(Number.MIN_VALUE);
result.push("whoops");
result.push("x");
=======================================
--- /trunk/test/mjsunit/allocation-site-info.js Mon Jul 21 00:04:41 2014 UTC
+++ /trunk/test/mjsunit/allocation-site-info.js Tue Jul 29 08:45:47 2014 UTC
@@ -297,10 +297,6 @@
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_onearg(0, 5);
assertKind(elements_kind.fast_double, obj);
-// Now pass a length that forces the dictionary path.
-obj = newarraycase_onearg(100000, 5);
-assertKind(elements_kind.dictionary, obj);
-assertTrue(obj.length == 100000);
// Verify that cross context calls work
var realmA = Realm.current();
=======================================
--- /trunk/test/mjsunit/apply.js Thu Oct 27 07:38:48 2011 UTC
+++ /trunk/test/mjsunit/apply.js Tue Jul 29 08:45:47 2014 UTC
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
function f0() {
return this;
}
@@ -114,7 +116,8 @@
for (var j = 1; j < 0x40000000; j <<= 1) {
try {
- var a = new Array(j);
+ var a = %NormalizeElements([]);
+ a.length = j;
a[j - 1] = 42;
assertEquals(42 + j, al.apply(345, a));
} catch (e) {
@@ -122,7 +125,8 @@
for (; j < 0x40000000; j <<= 1) {
var caught = false;
try {
- a = new Array(j);
+ a = %NormalizeElements([]);
+ a.length = j;
a[j - 1] = 42;
al.apply(345, a);
assertUnreachable("Apply of array with length " + a.length +
=======================================
--- /trunk/test/mjsunit/array-constructor-feedback.js Mon Jul 21 00:04:41
2014 UTC
+++ /trunk/test/mjsunit/array-constructor-feedback.js Tue Jul 29 08:45:47
2014 UTC
@@ -130,8 +130,7 @@
a = bar(10);
assertKind(elements_kind.fast, a);
assertOptimized(bar);
- a = bar(100000);
- assertKind(elements_kind.dictionary, a);
+ bar(100000);
assertOptimized(bar);
// If the argument isn't a smi, things should still work.
=======================================
--- /trunk/test/mjsunit/array-feedback.js Mon Jul 21 00:04:41 2014 UTC
+++ /trunk/test/mjsunit/array-feedback.js Tue Jul 29 08:45:47 2014 UTC
@@ -92,7 +92,7 @@
assertKind(elements_kind.fast, b);
a = create1(100000);
- assertKind(elements_kind.dictionary, a);
+ assertKind(elements_kind.fast_smi_only, a);
function create3(arg1, arg2, arg3) {
return Array(arg1, arg2, arg3);
=======================================
--- /trunk/test/mjsunit/elements-kind.js Mon Jul 21 00:04:41 2014 UTC
+++ /trunk/test/mjsunit/elements-kind.js Tue Jul 29 08:45:47 2014 UTC
@@ -145,7 +145,9 @@
}
assertKind(elements_kind.fast, you);
- assertKind(elements_kind.dictionary, new Array(0xDECAF));
+ var temp = [];
+ temp[0xDECAF] = 0;
+ assertKind(elements_kind.dictionary, temp);
var fast_double_array = new Array(0xDECAF);
for (var i = 0; i < 0xDECAF; i++) fast_double_array[i] = i / 2;
=======================================
--- /trunk/test/mjsunit/es7/object-observe.js Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/test/mjsunit/es7/object-observe.js Tue Jul 29 08:45:47 2014 UTC
@@ -1234,8 +1234,9 @@
// Updating length on large (slow) array
reset();
-var slow_arr = new Array(1000000000);
+var slow_arr = %NormalizeElements([]);
slow_arr[500000000] = 'hello';
+slow_arr.length = 1000000000;
Object.observe(slow_arr, observer.callback);
var spliceRecords;
function slowSpliceCallback(records) {
=======================================
--- /trunk/test/mjsunit/mjsunit.status Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/test/mjsunit/mjsunit.status Tue Jul 29 08:45:47 2014 UTC
@@ -88,6 +88,7 @@
# Skip long running tests that time out in debug mode.
'generated-transition-stub': [PASS, ['mode == debug', SKIP]],
'migrations': [SKIP],
+ 'array-functions-prototype-misc': [PASS, ['mode == debug', SKIP]],
##############################################################################
# This test sets the umask on a per-process basis and hence cannot be
=======================================
--- /trunk/test/mjsunit/polymorph-arrays.js Mon Jul 11 11:41:22 2011 UTC
+++ /trunk/test/mjsunit/polymorph-arrays.js Tue Jul 29 08:45:47 2014 UTC
@@ -37,7 +37,7 @@
a[i] = i;
}
a[5000000] = 256;
- assertTrue(%HasDictionaryElements(a));
+ return %NormalizeElements(a);
}
function testPolymorphicLoads() {
@@ -49,7 +49,7 @@
var object_array = new Object;
var sparse_object_array = new Object;
var js_array = new Array(10);
- var sparse_js_array = new Array(5000001);
+ var sparse_js_array = %NormalizeElements([]);
init_array(object_array);
init_array(js_array);
@@ -67,7 +67,7 @@
var object_array = new Object;
var sparse_object_array = new Object;
var js_array = new Array(10);
- var sparse_js_array = new Array(5000001);
+ var sparse_js_array = %NormalizeElements([]);
init_array(object_array);
init_array(js_array);
@@ -114,7 +114,8 @@
var object_array = new Object;
var sparse_object_array = new Object;
var js_array = new Array(10);
- var sparse_js_array = new Array(5000001);
+ var sparse_js_array = [];
+ sparse_js_array.length = 5000001;
init_array(object_array);
init_array(js_array);
@@ -132,7 +133,8 @@
var object_array = new Object;
var sparse_object_array = new Object;
var js_array = new Array(10);
- var sparse_js_array = new Array(5000001);
+ var sparse_js_array = %NormalizeElements([]);
+ sparse_js_array.length = 5000001;
init_array(object_array);
init_array(js_array);
=======================================
--- /trunk/test/mjsunit/regress/regress-2790.js Tue Nov 26 13:50:38 2013 UTC
+++ /trunk/test/mjsunit/regress/regress-2790.js Tue Jul 29 08:45:47 2014 UTC
@@ -26,6 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test that we can create arrays of any size.
-for (var i = 1000; i < 1000000; i += 197) {
+for (var i = 1000; i < 1000000; i += 19703) {
new Array(i);
}
=======================================
--- /trunk/tools/generate-runtime-tests.py Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/tools/generate-runtime-tests.py Tue Jul 29 08:45:47 2014 UTC
@@ -47,7 +47,7 @@
# that the parser doesn't bit-rot. Change the values as needed when you
add,
# remove or change runtime functions, but make sure we don't lose our
ability
# to parse them!
-EXPECTED_FUNCTION_COUNT = 420
+EXPECTED_FUNCTION_COUNT = 421
EXPECTED_FUZZABLE_COUNT = 335
EXPECTED_CCTEST_COUNT = 8
EXPECTED_UNKNOWN_COUNT = 4
@@ -124,6 +124,7 @@
# Arrays
"ArrayConstructor",
"InternalArrayConstructor",
+ "NormalizeElements",
# Literals
"MaterializeRegExpLiteral",
=======================================
--- /trunk/tools/push-to-trunk/releases.py Thu Jul 17 00:05:04 2014 UTC
+++ /trunk/tools/push-to-trunk/releases.py Tue Jul 29 08:45:47 2014 UTC
@@ -52,6 +52,11 @@
'|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)'
'([0-9]+)".*$', re.M)
+# Expression to pick tag and revision for bleeding edge tags. To be used
with
+# output of 'svn log'.
+BLEEDING_EDGE_TAGS_RE = re.compile(
+ r"A \/tags\/([^\s]+) \(from \/branches\/bleeding_edge\:(\d+)\)")
+
def SortBranches(branches):
"""Sort branches with version number names."""
@@ -140,24 +145,14 @@
patches = "-%s" % patches
return patches
- def GetRelease(self, git_hash, branch):
- self.ReadAndPersistVersion()
- base_version = [self["major"], self["minor"], self["build"]]
- version = ".".join(base_version)
- body = self.GitLog(n=1, format="%B", git_hash=git_hash)
-
- patches = ""
- if self["patch"] != "0":
- version += ".%s" % self["patch"]
- patches = self.GetMergedPatches(body)
-
- title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+ def GetReleaseDict(
+ self, git_hash, bleeding_edge_rev, branch, version, patches,
cl_body):
revision = self.GitSVNFindSVNRev(git_hash)
return {
# The SVN revision on the branch.
"revision": revision,
# The SVN revision on bleeding edge (only for newer trunk pushes).
- "bleeding_edge": self.GetBleedingEdgeFromPush(title),
+ "bleeding_edge": bleeding_edge_rev,
# The branch name.
"branch": branch,
# The version for displaying in the form 3.26.3 or 3.26.3.12.
@@ -172,14 +167,45 @@
"chromium_branch": "",
# Link to the CL on code review. Trunk pushes are not uploaded, so
this
# field will be populated below with the recent roll CL link.
- "review_link": MatchSafe(REVIEW_LINK_RE.search(body)),
+ "review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)),
# Link to the commit message on google code.
"revision_link": ("https://code.google.com/p/v8/source/detail?r=%s"
% revision),
- }, self["patch"]
+ }
+
+ def GetRelease(self, git_hash, branch):
+ self.ReadAndPersistVersion()
+ base_version = [self["major"], self["minor"], self["build"]]
+ version = ".".join(base_version)
+ body = self.GitLog(n=1, format="%B", git_hash=git_hash)
+
+ patches = ""
+ if self["patch"] != "0":
+ version += ".%s" % self["patch"]
+ patches = self.GetMergedPatches(body)
+
+ title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+ return self.GetReleaseDict(
+ git_hash, self.GetBleedingEdgeFromPush(title), branch, version,
+ patches, body), self["patch"]
+
+ def GetReleasesFromBleedingEdge(self):
+ tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v --limit
20")
+ releases = []
+ for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
+ git_hash = self.GitSVNFindGitHash(revision)
+
+ # Add bleeding edge release. It does not contain patches or a code
+ # review link, as tags are not uploaded.
+ releases.append(self.GetReleaseDict(
+ git_hash, revision, "bleeding_edge", tag, "", ""))
+ return releases
def GetReleasesFromBranch(self, branch):
self.GitReset("svn/%s" % branch)
+ if branch == 'bleeding_edge':
+ return self.GetReleasesFromBleedingEdge()
+
releases = []
try:
for git_hash in self.GitLog(format="%H").splitlines():
@@ -225,14 +251,16 @@
releases += self.GetReleasesFromBranch(stable)
releases += self.GetReleasesFromBranch(beta)
releases += self.GetReleasesFromBranch("trunk")
+ releases += self.GetReleasesFromBranch("bleeding_edge")
elif self._options.branch == 'all': # pragma: no cover
# Retrieve the full release history.
for branch in branches:
releases += self.GetReleasesFromBranch(branch)
releases += self.GetReleasesFromBranch("trunk")
+ releases += self.GetReleasesFromBranch("bleeding_edge")
else: # pragma: no cover
# Retrieve history for a specified branch.
- assert self._options.branch in branches + ["trunk"]
+ assert self._options.branch in branches + ["trunk", "bleeding_edge"]
releases += self.GetReleasesFromBranch(self._options.branch)
self["releases"] = sorted(releases,
=======================================
--- /trunk/tools/push-to-trunk/test_scripts.py Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/tools/push-to-trunk/test_scripts.py Tue Jul 29 08:45:47 2014 UTC
@@ -1167,6 +1167,33 @@
MergeToBranch(TEST_CONFIG, self).Run(args)
def testReleases(self):
+ tag_response_text = """
+------------------------------------------------------------------------
+r22631 | [email protected] | 2014-07-28 02:05:29 +0200 (Mon, 28 Jul
2014)
+Changed paths:
+ A /tags/3.28.43 (from /trunk:22630)
+
+Tagging version 3.28.43
+------------------------------------------------------------------------
+r22629 | [email protected] | 2014-07-26 05:09:29 +0200 (Sat, 26 Jul
2014)
+Changed paths:
+ A /tags/3.28.41 (from /branches/bleeding_edge:22626)
+
+Tagging version 3.28.41
+------------------------------------------------------------------------
+r22556 | [email protected] | 2014-07-23 13:31:59 +0200 (Wed, 23 Jul
2014)
+Changed paths:
+ A /tags/3.27.34.7 (from /branches/3.27:22555)
+
+Tagging version 3.27.34.7
+------------------------------------------------------------------------
+r22627 | [email protected] | 2014-07-26 01:39:15 +0200 (Sat, 26 Jul
2014)
+Changed paths:
+ A /tags/3.28.40 (from /branches/bleeding_edge:22624)
+
+Tagging version 3.28.40
+------------------------------------------------------------------------
+"""
json_output = self.MakeEmptyTempFile()
csv_output = self.MakeEmptyTempFile()
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
@@ -1230,6 +1257,15 @@
Git("log -1 --format=%ci hash6", ""),
Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(22, 5)),
+ Git("reset --hard svn/bleeding_edge", ""),
+ Git("log https://v8.googlecode.com/svn/tags -v --limit 20",
+ tag_response_text),
+ Git("svn find-rev r22626", "hash_22626"),
+ Git("svn find-rev hash_22626", "22626"),
+ Git("log -1 --format=%ci hash_22626", "01:23"),
+ Git("svn find-rev r22624", "hash_22624"),
+ Git("svn find-rev hash_22624", "22624"),
+ Git("log -1 --format=%ci hash_22624", "02:34"),
Git("status -s -uno", ""),
Git("checkout -f master", ""),
Git("pull", ""),
@@ -1260,12 +1296,22 @@
Releases(TEST_CONFIG, self).Run(args)
# Check expected output.
- csv = ("3.22.3,trunk,345,4567,\r\n"
+ csv = ("3.28.41,bleeding_edge,22626,,\r\n"
+ "3.28.40,bleeding_edge,22624,,\r\n"
+ "3.22.3,trunk,345,4567,\r\n"
"3.21.2,3.21,123,,\r\n"
"3.3.1.1,3.3,234,,12\r\n")
self.assertEquals(csv, FileToText(csv_output))
expected_json = [
+
{"bleeding_edge": "22626", "patches_merged": "", "version": "3.28.41",
+ "chromium_revision": "", "branch": "bleeding_edge", "revision": "22626",
+ "review_link": "", "date": "01:23", "chromium_branch": "",
+ "revision_link": "https://code.google.com/p/v8/source/detail?r=22626"},
+
{"bleeding_edge": "22624", "patches_merged": "", "version": "3.28.40",
+ "chromium_revision": "", "branch": "bleeding_edge", "revision": "22624",
+ "review_link": "", "date": "02:34", "chromium_branch": "",
+ "revision_link": "https://code.google.com/p/v8/source/detail?r=22624"},
{"bleeding_edge": "", "patches_merged": "", "version": "3.22.3",
"chromium_revision": "4567", "branch": "trunk", "revision": "345",
"review_link": "", "date": "", "chromium_branch": "7",
=======================================
--- /trunk/tools/whitespace.txt Fri Jul 25 00:05:16 2014 UTC
+++ /trunk/tools/whitespace.txt Tue Jul 29 08:45:47 2014 UTC
@@ -5,4 +5,4 @@
A Smi walks into a bar and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them and........
+The Smi looked at them and..........
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.