Revision: 17250
Author: [email protected]
Date: Thu Oct 17 08:06:21 2013 UTC
Log: Version 3.22.14
Performance and stability improvements on all platforms.
http://code.google.com/p/v8/source/detail?r=17250
Modified:
/trunk/ChangeLog
/trunk/src/arm/lithium-codegen-arm.cc
/trunk/src/builtins.cc
/trunk/src/compiler.cc
/trunk/src/flag-definitions.h
/trunk/src/handles.cc
/trunk/src/handles.h
/trunk/src/heap-profiler.h
/trunk/src/heap.cc
/trunk/src/hydrogen-dce.cc
/trunk/src/hydrogen-escape-analysis.cc
/trunk/src/hydrogen-instructions.cc
/trunk/src/hydrogen-instructions.h
/trunk/src/ia32/lithium-codegen-ia32.cc
/trunk/src/mark-compact.cc
/trunk/src/mips/lithium-codegen-mips.cc
/trunk/src/objects.cc
/trunk/src/objects.h
/trunk/src/optimizing-compiler-thread.cc
/trunk/src/optimizing-compiler-thread.h
/trunk/src/runtime.cc
/trunk/src/typing.cc
/trunk/src/version.cc
/trunk/src/x64/lithium-codegen-x64.cc
=======================================
--- /trunk/ChangeLog Wed Oct 16 09:00:56 2013 UTC
+++ /trunk/ChangeLog Thu Oct 17 08:06:21 2013 UTC
@@ -1,3 +1,8 @@
+2013-10-17: Version 3.22.14
+
+ Performance and stability improvements on all platforms.
+
+
2013-10-16: Version 3.22.13
Do not look up ArrayBuffer on global object in typed array
constructor.
=======================================
--- /trunk/src/arm/lithium-codegen-arm.cc Wed Oct 16 09:00:56 2013 UTC
+++ /trunk/src/arm/lithium-codegen-arm.cc Thu Oct 17 08:06:21 2013 UTC
@@ -2336,6 +2336,10 @@
case Token::EQ_STRICT:
cond = eq;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
case Token::LT:
cond = is_unsigned ? lo : lt;
break;
=======================================
--- /trunk/src/builtins.cc Tue Oct 15 08:25:05 2013 UTC
+++ /trunk/src/builtins.cc Thu Oct 17 08:06:21 2013 UTC
@@ -273,9 +273,12 @@
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(),
-size_delta);
}
- HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
- elms->address() + size_delta,
- elms->Size()));
+ HeapProfiler* profiler = heap->isolate()->heap_profiler();
+ if (profiler->is_profiling()) {
+ profiler->ObjectMoveEvent(elms->address(),
+ elms->address() + size_delta,
+ elms->Size());
+ }
return FixedArrayBase::cast(HeapObject::FromAddress(
elms->address() + to_trim * entry_size));
}
=======================================
--- /trunk/src/compiler.cc Thu Sep 26 07:36:30 2013 UTC
+++ /trunk/src/compiler.cc Thu Oct 17 08:06:21 2013 UTC
@@ -553,6 +553,33 @@
return LiveEditFunctionTracker::IsActive(info->isolate()) ||
(info->isolate()->DebuggerHasBreakPoints()
&& !allow_lazy_without_ctx);
}
+
+
+// Sets the expected number of properties based on estimate from compiler.
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo>
shared,
+ int estimate) {
+ // See the comment in SetExpectedNofProperties.
+ if (shared->live_objects_may_exist()) return;
+
+ // If no properties are added in the constructor, they are more likely
+ // to be added later.
+ if (estimate == 0) estimate = 2;
+
+ // TODO(yangguo): check whether those heuristics are still up-to-date.
+ // We do not shrink objects that go into a snapshot (yet), so we adjust
+ // the estimate conservatively.
+ if (Serializer::enabled()) {
+ estimate += 2;
+ } else if (FLAG_clever_optimizations) {
+ // Inobject slack tracking will reclaim redundant inobject space later,
+ // so we can afford to adjust the estimate generously.
+ estimate += 8;
+ } else {
+ estimate += 3;
+ }
+
+ shared->set_expected_nof_properties(estimate);
+}
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
=======================================
--- /trunk/src/flag-definitions.h Wed Oct 16 09:00:56 2013 UTC
+++ /trunk/src/flag-definitions.h Thu Oct 17 08:06:21 2013 UTC
@@ -313,6 +313,8 @@
DEFINE_bool(inline_arguments, true, "inline functions with arguments
object")
DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
+DEFINE_int(escape_analysis_iterations, 1,
+ "maximum number of escape analysis fix-point iterations")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
=======================================
--- /trunk/src/handles.cc Fri Oct 4 15:38:52 2013 UTC
+++ /trunk/src/handles.cc Thu Oct 17 08:06:21 2013 UTC
@@ -148,54 +148,6 @@
constructor->GetHeap()->ReinitializeJSGlobalProxy(*constructor,
*global),
JSGlobalProxy);
}
-
-
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
- // If objects constructed from this function exist then changing
- // 'estimated_nof_properties' is dangerous since the previous value might
- // have been compiled into the fast construct stub. More over, the
inobject
- // slack tracking logic might have adjusted the previous value, so even
- // passing the same value is risky.
- if (func->shared()->live_objects_may_exist()) return;
-
- func->shared()->set_expected_nof_properties(nof);
- if (func->has_initial_map()) {
- Handle<Map> new_initial_map =
- func->GetIsolate()->factory()->CopyMap(
- Handle<Map>(func->initial_map()));
- new_initial_map->set_unused_property_fields(nof);
- func->set_initial_map(*new_initial_map);
- }
-}
-
-
-static int ExpectedNofPropertiesFromEstimate(int estimate) {
- // If no properties are added in the constructor, they are more likely
- // to be added later.
- if (estimate == 0) estimate = 2;
-
- // We do not shrink objects that go into a snapshot (yet), so we adjust
- // the estimate conservatively.
- if (Serializer::enabled()) return estimate + 2;
-
- // Inobject slack tracking will reclaim redundant inobject space later,
- // so we can afford to adjust the estimate generously.
- if (FLAG_clever_optimizations) {
- return estimate + 8;
- } else {
- return estimate + 3;
- }
-}
-
-
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo>
shared,
- int estimate) {
- // See the comment in SetExpectedNofProperties.
- if (shared->live_objects_may_exist()) return;
-
- shared->set_expected_nof_properties(
- ExpectedNofPropertiesFromEstimate(estimate));
-}
void FlattenString(Handle<String> string) {
=======================================
--- /trunk/src/handles.h Fri Oct 4 15:38:52 2013 UTC
+++ /trunk/src/handles.h Thu Oct 17 08:06:21 2013 UTC
@@ -299,14 +299,6 @@
Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
Handle<FixedArray> second);
-// Sets the expected number of properties for the function's instances.
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
-
-// Sets the expected number of properties based on estimate from compiler.
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo>
shared,
- int estimate);
-
-
Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<JSFunction> constructor,
Handle<JSGlobalProxy> global);
=======================================
--- /trunk/src/heap-profiler.h Tue Oct 15 08:25:05 2013 UTC
+++ /trunk/src/heap-profiler.h Thu Oct 17 08:06:21 2013 UTC
@@ -37,14 +37,6 @@
class HeapSnapshot;
class HeapSnapshotsCollection;
-#define HEAP_PROFILE(heap,
call) \
- do
{ \
- v8::internal::HeapProfiler* profiler =
heap->isolate()->heap_profiler(); \
- if (profiler != NULL && profiler->is_profiling())
{ \
-
profiler->call; \
-
} \
- } while (false)
-
class HeapProfiler {
public:
explicit HeapProfiler(Heap* heap);
=======================================
--- /trunk/src/heap.cc Tue Oct 15 08:25:05 2013 UTC
+++ /trunk/src/heap.cc Thu Oct 17 08:06:21 2013 UTC
@@ -450,6 +450,10 @@
#endif // DEBUG
store_buffer()->GCPrologue();
+
+ if (FLAG_concurrent_osr) {
+ isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
+ }
}
@@ -2130,9 +2134,12 @@
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
- HEAP_PROFILE(heap,
- ObjectMoveEvent(source->address(), target->address(),
size));
Isolate* isolate = heap->isolate();
+ HeapProfiler* heap_profiler = isolate->heap_profiler();
+ if (heap_profiler->is_profiling()) {
+ heap_profiler->ObjectMoveEvent(source->address(),
target->address(),
+ size);
+ }
if (isolate->logger()->is_logging_code_events() ||
isolate->cpu_profiler()->is_profiling()) {
if (target->IsSharedFunctionInfo()) {
=======================================
--- /trunk/src/hydrogen-dce.cc Tue Oct 15 08:25:05 2013 UTC
+++ /trunk/src/hydrogen-dce.cc Thu Oct 17 08:06:21 2013 UTC
@@ -97,10 +97,12 @@
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (!instr->CheckFlag(HValue::kIsLive)) {
- // Instruction has not been marked live; assume it is dead and
remove.
- // TODO(titzer): we don't remove constants because some special
ones
- // might be used by later phases and are assumed to be in the graph
- if (!instr->IsConstant()) instr->DeleteAndReplaceWith(NULL);
+ // Instruction has not been marked live, so remove it.
+ if (!instr->IsConstant() || instr->block()->block_id() != 0) {
+ // TODO(titzer): Some global constants in block 0 can be used
+ // again later, and can't currently be removed. Fix that.
+ instr->DeleteAndReplaceWith(NULL);
+ }
} else {
// Clear the liveness flag to leave the graph clean for the next
DCE.
instr->ClearFlag(HValue::kIsLive);
=======================================
--- /trunk/src/hydrogen-escape-analysis.cc Mon Sep 23 14:09:36 2013 UTC
+++ /trunk/src/hydrogen-escape-analysis.cc Thu Oct 17 08:06:21 2013 UTC
@@ -306,7 +306,7 @@
number_of_objects_++;
block_states_.Clear();
- // Perform actual analysis steps.
+ // Perform actual analysis step.
AnalyzeDataFlow(allocate);
cumulative_values_ += number_of_values_;
@@ -320,8 +320,13 @@
// TODO(mstarzinger): We disable escape analysis with OSR for now,
because
// spill slots might be uninitialized. Needs investigation.
if (graph()->has_osr()) return;
- CollectCapturedValues();
- PerformScalarReplacement();
+ int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations;
+ for (int i = 0; i < max_fixpoint_iteration_count; i++) {
+ CollectCapturedValues();
+ if (captured_.is_empty()) break;
+ PerformScalarReplacement();
+ captured_.Clear();
+ }
}
=======================================
--- /trunk/src/hydrogen-instructions.cc Fri Oct 11 10:35:37 2013 UTC
+++ /trunk/src/hydrogen-instructions.cc Thu Oct 17 08:06:21 2013 UTC
@@ -2394,6 +2394,12 @@
env = env->outer();
}
}
+
+
+void HCapturedObject::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d ", capture_id());
+ HDematerializedObject::PrintDataTo(stream);
+}
void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
=======================================
--- /trunk/src/hydrogen-instructions.h Fri Oct 11 10:35:37 2013 UTC
+++ /trunk/src/hydrogen-instructions.h Thu Oct 17 08:06:21 2013 UTC
@@ -3317,6 +3317,8 @@
// Replay effects of this instruction on the given environment.
void ReplayEnvironment(HEnvironment* env);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
private:
=======================================
--- /trunk/src/ia32/lithium-codegen-ia32.cc Wed Oct 16 09:00:56 2013 UTC
+++ /trunk/src/ia32/lithium-codegen-ia32.cc Thu Oct 17 08:06:21 2013 UTC
@@ -2503,6 +2503,10 @@
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
=======================================
--- /trunk/src/mark-compact.cc Tue Oct 15 08:25:05 2013 UTC
+++ /trunk/src/mark-compact.cc Thu Oct 17 08:06:21 2013 UTC
@@ -2759,7 +2759,10 @@
Address src,
int size,
AllocationSpace dest) {
- HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst, size));
+ HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
+ if (heap_profiler->is_profiling()) {
+ heap_profiler->ObjectMoveEvent(src, dst, size);
+ }
ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
=======================================
--- /trunk/src/mips/lithium-codegen-mips.cc Wed Oct 16 09:00:56 2013 UTC
+++ /trunk/src/mips/lithium-codegen-mips.cc Thu Oct 17 08:06:21 2013 UTC
@@ -2178,6 +2178,10 @@
case Token::EQ_STRICT:
cond = eq;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
case Token::LT:
cond = is_unsigned ? lo : lt;
break;
=======================================
--- /trunk/src/objects.cc Wed Oct 16 09:00:56 2013 UTC
+++ /trunk/src/objects.cc Thu Oct 17 08:06:21 2013 UTC
@@ -343,9 +343,10 @@
}
-MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
- Object* structure,
- Name* name) {
+Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Object> structure,
+ Handle<Name> name) {
Isolate* isolate = name->GetIsolate();
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -353,66 +354,71 @@
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* value = (callback->getter)(isolate, receiver,
callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return value;
+ Handle<Foreign>::cast(structure)->foreign_address());
+ CALL_HEAP_FUNCTION(isolate,
+ (callback->getter)(isolate, *receiver,
callback->data),
+ Object);
}
// api style callbacks.
if (structure->IsAccessorInfo()) {
- if (!AccessorInfo::cast(structure)->IsCompatibleReceiver(receiver)) {
- Handle<Object> name_handle(name, isolate);
- Handle<Object> receiver_handle(receiver, isolate);
- Handle<Object> args[2] = { name_handle, receiver_handle };
+ Handle<AccessorInfo> accessor_info =
Handle<AccessorInfo>::cast(structure);
+ if (!accessor_info->IsCompatibleReceiver(*receiver)) {
+ Handle<Object> args[2] = { name, receiver };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
HandleVector(args,
ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>::null();
}
// TODO(rossberg): Handling symbols in the API requires changing the
API,
// so we do not support it for now.
- if (name->IsSymbol()) return isolate->heap()->undefined_value();
+ if (name->IsSymbol()) return isolate->factory()->undefined_value();
if (structure->IsDeclaredAccessorInfo()) {
- return GetDeclaredAccessorProperty(receiver,
-
DeclaredAccessorInfo::cast(structure),
- isolate);
+ CALL_HEAP_FUNCTION(
+ isolate,
+ GetDeclaredAccessorProperty(*receiver,
+
DeclaredAccessorInfo::cast(*structure),
+ isolate),
+ Object);
}
- ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
- Object* fun_obj = data->getter();
+
+ Handle<ExecutableAccessorInfo> data =
+ Handle<ExecutableAccessorInfo>::cast(structure);
v8::AccessorGetterCallback call_fun =
- v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
- if (call_fun == NULL) return isolate->heap()->undefined_value();
+ v8::ToCData<v8::AccessorGetterCallback>(data->getter());
+ if (call_fun == NULL) return isolate->factory()->undefined_value();
+
HandleScope scope(isolate);
- JSObject* self = JSObject::cast(receiver);
- Handle<String> key(String::cast(name));
- LOG(isolate, ApiNamedPropertyAccess("load", self, name));
- PropertyCallbackArguments args(isolate, data->data(), self, this);
+ Handle<JSObject> self = Handle<JSObject>::cast(receiver);
+ Handle<String> key = Handle<String>::cast(name);
+ LOG(isolate, ApiNamedPropertyAccess("load", *self, *name));
+ PropertyCallbackArguments args(isolate, data->data(), *self, *object);
v8::Handle<v8::Value> result =
args.Call(call_fun, v8::Utils::ToLocal(key));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.IsEmpty()) {
- return isolate->heap()->undefined_value();
+ return isolate->factory()->undefined_value();
}
- Object* return_value = *v8::Utils::OpenHandle(*result);
+ Handle<Object> return_value = v8::Utils::OpenHandle(*result);
return_value->VerifyApiCallResultType();
- return return_value;
+ return scope.CloseAndEscape(return_value);
}
// __defineGetter__ callback
- if (structure->IsAccessorPair()) {
- Object* getter = AccessorPair::cast(structure)->getter();
- if (getter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(receiver,
JSReceiver::cast(getter));
- }
- // Getter is not a function.
- return isolate->heap()->undefined_value();
+ Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(),
+ isolate);
+ if (getter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ CALL_HEAP_FUNCTION(
+ isolate,
+ object->GetPropertyWithDefinedGetter(*receiver,
+ JSReceiver::cast(*getter)),
+ Object);
}
-
- UNREACHABLE();
- return NULL;
+ // Getter is not a function.
+ return isolate->factory()->undefined_value();
}
@@ -505,19 +511,6 @@
if (has_pending_exception) return Failure::Exception();
return *result;
}
-
-
-// TODO(yangguo): this should eventually replace the non-handlified
version.
-Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
- Handle<Object> receiver,
- Handle<Object> structure,
- Handle<Name> name) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->GetPropertyWithCallback(*receiver,
- *structure,
- *name),
- Object);
-}
// Only deal with CALLBACKS and INTERCEPTOR
@@ -903,9 +896,16 @@
}
case CONSTANT:
return result->GetConstant();
- case CALLBACKS:
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
+ case CALLBACKS: {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithCallback(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(result->GetCallbackObject(), isolate),
+ handle(name, isolate));
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
+ }
case HANDLER:
return result->proxy()->GetPropertyWithHandler(receiver, name);
case INTERCEPTOR: {
@@ -9381,6 +9381,7 @@
if (number_of_own_descriptors > 0) {
TrimDescriptorArray(heap, this, descriptors,
number_of_own_descriptors);
ASSERT(descriptors->number_of_descriptors() ==
number_of_own_descriptors);
+ set_owns_descriptors(true);
} else {
ASSERT(descriptors == GetHeap()->empty_descriptor_array());
}
=======================================
--- /trunk/src/objects.h Wed Oct 16 09:00:56 2013 UTC
+++ /trunk/src/objects.h Thu Oct 17 08:06:21 2013 UTC
@@ -2127,10 +2127,6 @@
Handle<Object> structure,
Handle<Name> name);
- MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
- Object* structure,
- Name* name);
-
static Handle<Object> SetPropertyWithCallback(
Handle<JSObject> object,
Handle<Object> structure,
=======================================
--- /trunk/src/optimizing-compiler-thread.cc Tue Oct 15 08:25:05 2013 UTC
+++ /trunk/src/optimizing-compiler-thread.cc Thu Oct 17 08:06:21 2013 UTC
@@ -37,6 +37,19 @@
namespace v8 {
namespace internal {
+OptimizingCompilerThread::~OptimizingCompilerThread() {
+ ASSERT_EQ(0, input_queue_length_);
+ DeleteArray(input_queue_);
+ if (FLAG_concurrent_osr) {
+#ifdef DEBUG
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ CHECK_EQ(NULL, osr_buffer_[i]);
+ }
+#endif
+ DeleteArray(osr_buffer_);
+ }
+}
+
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
@@ -91,14 +104,22 @@
}
}
}
+
+
+RecompileJob* OptimizingCompilerThread::NextInput() {
+ LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
+ if (input_queue_length_ == 0) return NULL;
+ RecompileJob* job = input_queue_[InputQueueIndex(0)];
+ ASSERT_NE(NULL, job);
+ input_queue_shift_ = InputQueueIndex(1);
+ input_queue_length_--;
+ return job;
+}
void OptimizingCompilerThread::CompileNext() {
- RecompileJob* job = NULL;
- bool result = input_queue_.Dequeue(&job);
- USE(result);
- ASSERT(result);
- Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
+ RecompileJob* job = NextInput();
+ ASSERT_NE(NULL, job);
// The function may have already been optimized by OSR. Simply continue.
RecompileJob::Status status = job->OptimizeGraph();
@@ -131,7 +152,7 @@
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code)
{
RecompileJob* job;
- while (input_queue_.Dequeue(&job)) {
+ while ((job = NextInput())) {
// This should not block, since we have one signal on the input queue
// semaphore corresponding to each element in the input queue.
input_queue_semaphore_.Wait();
@@ -140,7 +161,6 @@
DisposeRecompileJob(job, restore_function_code);
}
}
- Release_Store(&queue_length_, static_cast<AtomicWord>(0));
}
@@ -156,12 +176,12 @@
void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
- RecompileJob* job;
- for (int i = 0; i < osr_buffer_size_; i++) {
- job = osr_buffer_[i];
- if (job != NULL) DisposeRecompileJob(job, restore_function_code);
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ if (osr_buffer_[i] != NULL) {
+ DisposeRecompileJob(osr_buffer_[i], restore_function_code);
+ osr_buffer_[i] = NULL;
+ }
}
- osr_cursor_ = 0;
}
@@ -187,10 +207,9 @@
stop_semaphore_.Wait();
if (FLAG_concurrent_recompilation_delay != 0) {
- // Barrier when loading queue length is not necessary since the write
- // happens in CompileNext on the same thread.
- // This is used only for testing.
- while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
+ // At this point the optimizing compiler thread's event loop has
stopped.
+ // There is no need for a mutex when reading input_queue_length_.
+ while (input_queue_length_ > 0) CompileNext();
InstallOptimizedFunctions();
} else {
FlushInputQueue(false);
@@ -239,7 +258,6 @@
void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
ASSERT(IsQueueAvailable());
ASSERT(!IsOptimizerThread());
- Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
CompilationInfo* info = job->info();
if (info->is_osr()) {
if (FLAG_trace_concurrent_recompilation) {
@@ -247,13 +265,24 @@
info->closure()->PrintName();
PrintF(" for concurrent on-stack replacement.\n");
}
- AddToOsrBuffer(job);
osr_attempts_++;
BackEdgeTable::AddStackCheck(info);
+ AddToOsrBuffer(job);
+ // Add job to the front of the input queue.
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ ASSERT_LT(input_queue_length_, input_queue_capacity_);
+ // Move shift_ back by one.
+ input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
+ input_queue_[InputQueueIndex(0)] = job;
+ input_queue_length_++;
} else {
info->closure()->MarkInRecompileQueue();
+ // Add job to the back of the input queue.
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ ASSERT_LT(input_queue_length_, input_queue_capacity_);
+ input_queue_[InputQueueIndex(input_queue_length_)] = job;
+ input_queue_length_++;
}
- input_queue_.Enqueue(job);
if (FLAG_block_concurrent_recompilation) {
blocked_jobs_++;
} else {
@@ -274,15 +303,14 @@
RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
Handle<JSFunction> function, uint32_t osr_pc_offset) {
ASSERT(!IsOptimizerThread());
- RecompileJob* result = NULL;
- for (int i = 0; i < osr_buffer_size_; i++) {
- result = osr_buffer_[i];
- if (result == NULL) continue;
- if (result->IsWaitingForInstall() &&
- result->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL &&
+ current->IsWaitingForInstall() &&
+ current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
osr_hits_++;
osr_buffer_[i] = NULL;
- return result;
+ return current;
}
}
return NULL;
@@ -292,10 +320,11 @@
bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
uint32_t osr_pc_offset) {
ASSERT(!IsOptimizerThread());
- for (int i = 0; i < osr_buffer_size_; i++) {
- if (osr_buffer_[i] != NULL &&
- osr_buffer_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
- return !osr_buffer_[i]->IsWaitingForInstall();
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL &&
+ current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ return !current->IsWaitingForInstall();
}
}
return false;
@@ -304,10 +333,10 @@
bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
ASSERT(!IsOptimizerThread());
- for (int i = 0; i < osr_buffer_size_; i++) {
- if (osr_buffer_[i] != NULL &&
- *osr_buffer_[i]->info()->closure() == function) {
- return !osr_buffer_[i]->IsWaitingForInstall();
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL && *current->info()->closure() == function) {
+ return !current->IsWaitingForInstall();
}
}
return false;
@@ -316,27 +345,27 @@
void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
ASSERT(!IsOptimizerThread());
- // Store into next empty slot or replace next stale OSR job that's
waiting
- // in vain. Dispose in the latter case.
- RecompileJob* stale;
+ // Find the next slot that is empty or has a stale job.
while (true) {
- stale = osr_buffer_[osr_cursor_];
- if (stale == NULL) break;
- if (stale->IsWaitingForInstall()) {
- CompilationInfo* info = stale->info();
- if (FLAG_trace_osr) {
- PrintF("[COSR - Discarded ");
- info->closure()->PrintName();
- PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
- }
- DisposeRecompileJob(stale, false);
- break;
+ RecompileJob* stale = osr_buffer_[osr_buffer_cursor_];
+ if (stale == NULL || stale->IsWaitingForInstall()) break;
+ osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
+ }
+
+ // Add to found slot and dispose the evicted job.
+ RecompileJob* evicted = osr_buffer_[osr_buffer_cursor_];
+ if (evicted != NULL) {
+ ASSERT(evicted->IsWaitingForInstall());
+ CompilationInfo* info = evicted->info();
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - Discarded ");
+ info->closure()->PrintName();
+ PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
}
- AdvanceOsrCursor();
+ DisposeRecompileJob(evicted, false);
}
-
- osr_buffer_[osr_cursor_] = job;
- AdvanceOsrCursor();
+ osr_buffer_[osr_buffer_cursor_] = job;
+ osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
}
=======================================
--- /trunk/src/optimizing-compiler-thread.h Tue Oct 15 08:25:05 2013 UTC
+++ /trunk/src/optimizing-compiler-thread.h Thu Oct 17 08:06:21 2013 UTC
@@ -53,22 +53,24 @@
isolate_(isolate),
stop_semaphore_(0),
input_queue_semaphore_(0),
- osr_cursor_(0),
+ input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
+ input_queue_length_(0),
+ input_queue_shift_(0),
+ osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
+ osr_buffer_cursor_(0),
osr_hits_(0),
osr_attempts_(0),
blocked_jobs_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
- NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+ input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_);
if (FLAG_concurrent_osr) {
- osr_buffer_size_ = FLAG_concurrent_recompilation_queue_length + 4;
- osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_size_);
- for (int i = 0; i < osr_buffer_size_; i++) osr_buffer_[i] = NULL;
+ // Allocate and mark OSR buffer slots as empty.
+ osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_);
+ for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
}
}
- ~OptimizingCompilerThread() {
- if (FLAG_concurrent_osr) DeleteArray(osr_buffer_);
- }
+ ~OptimizingCompilerThread();
void Run();
void Stop();
@@ -83,17 +85,15 @@
bool IsQueuedForOSR(JSFunction* function);
inline bool IsQueueAvailable() {
- // We don't need a barrier since we have a data dependency right
- // after.
- Atomic32 current_length = NoBarrier_Load(&queue_length_);
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ return input_queue_length_ < input_queue_capacity_;
+ }
- // This can be queried only from the execution thread.
- ASSERT(!IsOptimizerThread());
- // Since only the execution thread increments queue_length_ and
- // only one thread can run inside an Isolate at one time, a direct
- // doesn't introduce a race -- queue_length_ may decreased in
- // meantime, but not increased.
- return (current_length < FLAG_concurrent_recompilation_queue_length);
+ inline void AgeBufferedOsrJobs() {
+ // Advance cursor of the cyclic buffer to next empty slot or stale OSR
job.
+ // Dispose said OSR job in the latter case. Calling this on every GC
+ // should make sure that we do not hold onto stale jobs indefinitely.
+ AddToOsrBuffer(NULL);
}
#ifdef DEBUG
@@ -107,12 +107,17 @@
void FlushOutputQueue(bool restore_function_code);
void FlushOsrBuffer(bool restore_function_code);
void CompileNext();
+ RecompileJob* NextInput();
// Add a recompilation task for OSR to the cyclic buffer, awaiting OSR
entry.
// Tasks evicted from the cyclic buffer are discarded.
void AddToOsrBuffer(RecompileJob* compiler);
- void AdvanceOsrCursor() {
- osr_cursor_ = (osr_cursor_ + 1) % osr_buffer_size_;
+
+ inline int InputQueueIndex(int i) {
+ int result = (i + input_queue_shift_) % input_queue_capacity_;
+ ASSERT_LE(0, result);
+ ASSERT_LT(result, input_queue_capacity_);
+ return result;
}
#ifdef DEBUG
@@ -124,20 +129,22 @@
Semaphore stop_semaphore_;
Semaphore input_queue_semaphore_;
- // Queue of incoming recompilation tasks (including OSR).
- UnboundQueue<RecompileJob*> input_queue_;
+ // Circular queue of incoming recompilation tasks (including OSR).
+ RecompileJob** input_queue_;
+ int input_queue_capacity_;
+ int input_queue_length_;
+ int input_queue_shift_;
+ Mutex input_queue_mutex_;
+
// Queue of recompilation tasks ready to be installed (excluding OSR).
UnboundQueue<RecompileJob*> output_queue_;
+
// Cyclic buffer of recompilation tasks for OSR.
- // TODO(yangguo): This may keep zombie tasks indefinitely, holding on to
- // a lot of memory. Fix this.
RecompileJob** osr_buffer_;
- // Cursor for the cyclic buffer.
- int osr_cursor_;
- int osr_buffer_size_;
+ int osr_buffer_capacity_;
+ int osr_buffer_cursor_;
volatile AtomicWord stop_thread_;
- volatile Atomic32 queue_length_;
TimeDelta time_spent_compiling_;
TimeDelta time_spent_total_;
=======================================
--- /trunk/src/runtime.cc Wed Oct 16 09:00:56 2013 UTC
+++ /trunk/src/runtime.cc Thu Oct 17 08:06:21 2013 UTC
@@ -2977,10 +2977,24 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
CONVERT_SMI_ARG_CHECKED(num, 1);
RUNTIME_ASSERT(num >= 0);
- SetExpectedNofProperties(function, num);
+ // If objects constructed from this function exist then changing
+ // 'estimated_nof_properties' is dangerous since the previous value might
+ // have been compiled into the fast construct stub. Moreover, the
inobject
+ // slack tracking logic might have adjusted the previous value, so even
+ // passing the same value is risky.
+ if (!func->shared()->live_objects_may_exist()) {
+ func->shared()->set_expected_nof_properties(num);
+ if (func->has_initial_map()) {
+ Handle<Map> new_initial_map =
+ func->GetIsolate()->factory()->CopyMap(
+ Handle<Map>(func->initial_map()));
+ new_initial_map->set_unused_property_fields(num);
+ func->set_initial_map(*new_initial_map);
+ }
+ }
return isolate->heap()->undefined_value();
}
@@ -10720,19 +10734,20 @@
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
if (structure->IsForeign() || structure->IsAccessorInfo()) {
- MaybeObject* maybe_value =
result->holder()->GetPropertyWithCallback(
- receiver, structure, name);
- if (!maybe_value->ToObject(&value)) {
- if (maybe_value->IsRetryAfterGC()) return maybe_value;
- ASSERT(maybe_value->IsException());
- maybe_value = heap->isolate()->pending_exception();
+ Isolate* isolate = heap->isolate();
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithCallback(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(structure, isolate),
+ handle(name, isolate));
+ if (value.is_null()) {
+ MaybeObject* exception = heap->isolate()->pending_exception();
heap->isolate()->clear_pending_exception();
- if (caught_exception != NULL) {
- *caught_exception = true;
- }
- return maybe_value;
+ if (caught_exception != NULL) *caught_exception = true;
+ return exception;
}
- return value;
+ return *value;
} else {
return heap->undefined_value();
}
=======================================
--- /trunk/src/typing.cc Tue Oct 15 08:25:05 2013 UTC
+++ /trunk/src/typing.cc Thu Oct 17 08:06:21 2013 UTC
@@ -603,8 +603,10 @@
case Token::SHR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- // TODO(rossberg): we could use an UnsignedSmi as lower bound here...
- NarrowType(expr, Bounds(Type::Unsigned32(), isolate_));
+ // TODO(rossberg): The upper bound would be Unsigned32, but since
there
+ // is no 'positive Smi' type for the lower bound, we use the smallest
+ // union of Smi and Unsigned32 as upper bound instead.
+ NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
=======================================
--- /trunk/src/version.cc Wed Oct 16 15:34:25 2013 UTC
+++ /trunk/src/version.cc Thu Oct 17 08:06:21 2013 UTC
@@ -34,8 +34,8 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 22
-#define BUILD_NUMBER 13
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 14
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
=======================================
--- /trunk/src/x64/lithium-codegen-x64.cc Wed Oct 16 09:00:56 2013 UTC
+++ /trunk/src/x64/lithium-codegen-x64.cc Thu Oct 17 08:06:21 2013 UTC
@@ -2048,6 +2048,10 @@
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.