Revision: 17742
Author: [email protected]
Date: Thu Nov 14 12:13:26 2013 UTC
Log: This is the exact copy of r17365 which was reverted in r17488 due
to memory corruption. The root cause for the memory corruption - missing
check for bump pointer limit before storing filler map must be addressed in
r17626 where allocation hooks were removed from the generated code and left
only in the runtime.
This is initial implementation of allocation profiler.
Whenever new object allocation is reported to the HeapProfiler and
allocation tracking is on we will capture current stack trace, add it to
the collection of the allocation traces (a tree) and attribute the
allocated size to the top JS function on the stack.
Format of serialized heap snapshot is extended to include information about
recorded allocation stack traces.
This patch is r17301 plus a fix for the test crash in debug mode. The test
crashed because we were traversing stack trace when just allocated object
wasn't completely configured, in particular the map pointer was incorrect.
Invalid Map pointer broke heap iteration required to find Code object for a
given pc during stack traversal. The solution is to insert free space
filler in the newly allocated block just before collecting stack trace.
BUG=chromium:277984,v8:2949
[email protected]
Review URL: https://codereview.chromium.org/61893031
http://code.google.com/p/v8/source/detail?r=17742
Added:
/branches/bleeding_edge/src/allocation-tracker.cc
/branches/bleeding_edge/src/allocation-tracker.h
Modified:
/branches/bleeding_edge/src/heap-snapshot-generator.cc
/branches/bleeding_edge/src/heap-snapshot-generator.h
/branches/bleeding_edge/test/cctest/test-heap-profiler.cc
/branches/bleeding_edge/tools/gyp/v8.gyp
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/allocation-tracker.cc Thu Nov 14 12:13:26
2013 UTC
@@ -0,0 +1,279 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "allocation-tracker.h"
+
+#include "heap-snapshot-generator.h"
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+AllocationTraceNode::AllocationTraceNode(
+ AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
+ : tree_(tree),
+ function_id_(shared_function_info_id),
+ total_size_(0),
+ allocation_count_(0),
+ id_(tree->next_node_id()) {
+}
+
+
+AllocationTraceNode::~AllocationTraceNode() {
+}
+
+
+AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
+ for (int i = 0; i < children_.length(); i++) {
+ AllocationTraceNode* node = children_[i];
+ if (node->function_id() == id) return node;
+ }
+ return NULL;
+}
+
+
+AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId
id) {
+ AllocationTraceNode* child = FindChild(id);
+ if (child == NULL) {
+ child = new AllocationTraceNode(tree_, id);
+ children_.Add(child);
+ }
+ return child;
+}
+
+
+void AllocationTraceNode::AddAllocation(unsigned size) {
+ total_size_ += size;
+ ++allocation_count_;
+}
+
+
+void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
+ OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
+ if (tracker != NULL) {
+ const char* name = "<unknown function>";
+ if (function_id_ != 0) {
+ AllocationTracker::FunctionInfo* info =
+ tracker->GetFunctionInfo(function_id_);
+ if (info != NULL) {
+ name = info->name;
+ }
+ }
+ OS::Print("%s #%u", name, id_);
+ } else {
+ OS::Print("%u #%u", function_id_, id_);
+ }
+ OS::Print("\n");
+ indent += 2;
+ for (int i = 0; i < children_.length(); i++) {
+ children_[i]->Print(indent, tracker);
+ }
+}
+
+
+AllocationTraceTree::AllocationTraceTree()
+ : next_node_id_(1),
+ root_(this, 0) {
+}
+
+
+AllocationTraceTree::~AllocationTraceTree() {
+}
+
+
+AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
+ const Vector<SnapshotObjectId>& path) {
+ AllocationTraceNode* node = root();
+ for (SnapshotObjectId* entry = path.start() + path.length() - 1;
+ entry != path.start() - 1;
+ --entry) {
+ node = node->FindOrAddChild(*entry);
+ }
+ return node;
+}
+
+
+void AllocationTraceTree::Print(AllocationTracker* tracker) {
+ OS::Print("[AllocationTraceTree:]\n");
+ OS::Print("Total size | Allocation count | Function id | id\n");
+ root()->Print(0, tracker);
+}
+
+void AllocationTracker::DeleteUnresolvedLocation(
+ UnresolvedLocation** location) {
+ delete *location;
+}
+
+
+AllocationTracker::FunctionInfo::FunctionInfo()
+ : name(""),
+ script_name(""),
+ script_id(0),
+ line(-1),
+ column(-1) {
+}
+
+
+static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
+AllocationTracker::AllocationTracker(
+ HeapObjectsMap* ids, StringsStorage* names)
+ : ids_(ids),
+ names_(names),
+ id_to_function_info_(AddressesMatch) {
+}
+
+
+AllocationTracker::~AllocationTracker() {
+ unresolved_locations_.Iterate(DeleteUnresolvedLocation);
+}
+
+
+void AllocationTracker::PrepareForSerialization() {
+ List<UnresolvedLocation*> copy(unresolved_locations_.length());
+ copy.AddAll(unresolved_locations_);
+ unresolved_locations_.Clear();
+ for (int i = 0; i < copy.length(); i++) {
+ copy[i]->Resolve();
+ delete copy[i];
+ }
+}
+
+
+void AllocationTracker::NewObjectEvent(Address addr, int size) {
+ DisallowHeapAllocation no_allocation;
+ Heap* heap = ids_->heap();
+
+ // Mark the new block as FreeSpace to make sure the heap is iterable
+ // while we are capturing stack trace.
+ FreeListNode::FromAddress(addr)->set_size(heap, size);
+ ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size);
+ ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
+
+ Isolate* isolate = heap->isolate();
+ int length = 0;
+ StackTraceFrameIterator it(isolate);
+ while (!it.done() && length < kMaxAllocationTraceLength) {
+ JavaScriptFrame* frame = it.frame();
+ SharedFunctionInfo* shared = frame->function()->shared();
+ SnapshotObjectId id = ids_->FindEntry(shared->address());
+ allocation_trace_buffer_[length++] = id;
+ AddFunctionInfo(shared, id);
+ it.Advance();
+ }
+ AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
+ Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
+ top_node->AddAllocation(size);
+}
+
+
+static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
+ return ComputeIntegerHash(static_cast<uint32_t>(id),
+ v8::internal::kZeroHashSeed);
+}
+
+
+AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_.Lookup(
+ reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
+ if (entry == NULL) {
+ return NULL;
+ }
+ return reinterpret_cast<FunctionInfo*>(entry->value);
+}
+
+
+void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_.Lookup(
+ reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
+ if (entry->value == NULL) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = names_->GetFunctionName(shared->DebugName());
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
+ if (script->name()->IsName()) {
+ Name* name = Name::cast(script->name());
+ info->script_name = names_->GetName(name);
+ }
+ info->script_id = script->id()->value();
+ // Converting start offset into line and column may cause heap
+ // allocations so we postpone them until snapshot serialization.
+ unresolved_locations_.Add(new UnresolvedLocation(
+ script,
+ shared->start_position(),
+ info));
+ }
+ entry->value = info;
+ }
+}
+
+
+AllocationTracker::UnresolvedLocation::UnresolvedLocation(
+ Script* script, int start, FunctionInfo* info)
+ : start_position_(start),
+ info_(info) {
+ script_ = Handle<Script>::cast(
+ script->GetIsolate()->global_handles()->Create(script));
+ GlobalHandles::MakeWeak(
+ reinterpret_cast<Object**>(script_.location()),
+ this, &HandleWeakScript);
+}
+
+
+AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
+ if (!script_.is_null()) {
+ script_->GetIsolate()->global_handles()->Destroy(
+ reinterpret_cast<Object**>(script_.location()));
+ }
+}
+
+
+void AllocationTracker::UnresolvedLocation::Resolve() {
+ if (script_.is_null()) return;
+ info_->line = GetScriptLineNumber(script_, start_position_);
+ info_->column = GetScriptColumnNumber(script_, start_position_);
+}
+
+
+void AllocationTracker::UnresolvedLocation::HandleWeakScript(
+ v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data) {
+ UnresolvedLocation* location =
reinterpret_cast<UnresolvedLocation*>(data);
+ location->script_ = Handle<Script>::null();
+ obj->Dispose();
+}
+
+
+} } // namespace v8::internal
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/allocation-tracker.h Thu Nov 14 12:13:26
2013 UTC
@@ -0,0 +1,138 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_TRACKER_H_
+#define V8_ALLOCATION_TRACKER_H_
+
+namespace v8 {
+namespace internal {
+
+class HeapObjectsMap;
+
+class AllocationTraceTree;
+
+class AllocationTraceNode {
+ public:
+ AllocationTraceNode(AllocationTraceTree* tree,
+ SnapshotObjectId shared_function_info_id);
+ ~AllocationTraceNode();
+ AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id);
+ AllocationTraceNode* FindOrAddChild(SnapshotObjectId
shared_function_info_id);
+ void AddAllocation(unsigned size);
+
+ SnapshotObjectId function_id() const { return function_id_; }
+ unsigned allocation_size() const { return total_size_; }
+ unsigned allocation_count() const { return allocation_count_; }
+ unsigned id() const { return id_; }
+ Vector<AllocationTraceNode*> children() const { return
children_.ToVector(); }
+
+ void Print(int indent, AllocationTracker* tracker);
+
+ private:
+ AllocationTraceTree* tree_;
+ SnapshotObjectId function_id_;
+ unsigned total_size_;
+ unsigned allocation_count_;
+ unsigned id_;
+ List<AllocationTraceNode*> children_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTraceNode);
+};
+
+
+class AllocationTraceTree {
+ public:
+ AllocationTraceTree();
+ ~AllocationTraceTree();
+ AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>&
path);
+ AllocationTraceNode* root() { return &root_; }
+ unsigned next_node_id() { return next_node_id_++; }
+ void Print(AllocationTracker* tracker);
+
+ private:
+ unsigned next_node_id_;
+ AllocationTraceNode root_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTraceTree);
+};
+
+
+class AllocationTracker {
+ public:
+ struct FunctionInfo {
+ FunctionInfo();
+ const char* name;
+ const char* script_name;
+ int script_id;
+ int line;
+ int column;
+ };
+
+ AllocationTracker(HeapObjectsMap* ids, StringsStorage* names);
+ ~AllocationTracker();
+
+ void PrepareForSerialization();
+ void NewObjectEvent(Address addr, int size);
+
+ AllocationTraceTree* trace_tree() { return &trace_tree_; }
+ HashMap* id_to_function_info() { return &id_to_function_info_; }
+ FunctionInfo* GetFunctionInfo(SnapshotObjectId id);
+
+ private:
+ void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+
+ class UnresolvedLocation {
+ public:
+ UnresolvedLocation(Script* script, int start, FunctionInfo* info);
+ ~UnresolvedLocation();
+ void Resolve();
+
+ private:
+ static void HandleWeakScript(v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data);
+ Handle<Script> script_;
+ int start_position_;
+ FunctionInfo* info_;
+ };
+ static void DeleteUnresolvedLocation(UnresolvedLocation** location);
+
+ static const int kMaxAllocationTraceLength = 64;
+ HeapObjectsMap* ids_;
+ StringsStorage* names_;
+ AllocationTraceTree trace_tree_;
+ SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength];
+ HashMap id_to_function_info_;
+ List<UnresolvedLocation*> unresolved_locations_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_TRACKER_H_
+
=======================================
--- /branches/bleeding_edge/src/heap-snapshot-generator.cc Tue Nov 5
13:46:11 2013 UTC
+++ /branches/bleeding_edge/src/heap-snapshot-generator.cc Thu Nov 14
12:13:26 2013 UTC
@@ -29,6 +29,7 @@
#include "heap-snapshot-generator-inl.h"
+#include "allocation-tracker.h"
#include "code-stubs.h"
#include "heap-profiler.h"
#include "debug.h"
@@ -748,7 +749,8 @@
HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
: is_tracking_objects_(false),
names_(heap),
- ids_(heap) {
+ ids_(heap),
+ allocation_tracker_(NULL) {
}
@@ -758,8 +760,27 @@
HeapSnapshotsCollection::~HeapSnapshotsCollection() {
+ delete allocation_tracker_;
snapshots_.Iterate(DeleteHeapSnapshot);
}
+
+
+void HeapSnapshotsCollection::StartHeapObjectsTracking() {
+ ids_.UpdateHeapObjectsMap();
+ if (allocation_tracker_ == NULL) {
+ allocation_tracker_ = new AllocationTracker(&ids_, names());
+ }
+ is_tracking_objects_ = true;
+}
+
+
+void HeapSnapshotsCollection::StopHeapObjectsTracking() {
+ ids_.StopHeapObjectsTracking();
+ if (allocation_tracker_ != NULL) {
+ delete allocation_tracker_;
+ allocation_tracker_ = NULL;
+ }
+}
HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
@@ -805,6 +826,15 @@
}
+void HeapSnapshotsCollection::NewObjectEvent(Address addr, int size) {
+ DisallowHeapAllocation no_allocation;
+ ids_.NewObject(addr, size);
+ if (allocation_tracker_ != NULL) {
+ allocation_tracker_->NewObjectEvent(addr, size);
+ }
+}
+
+
size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
size_t size = sizeof(*this);
size += names_.GetUsedMemorySize();
@@ -2645,6 +2675,10 @@
const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
+ if (AllocationTracker* allocation_tracker =
+ snapshot_->collection()->allocation_tracker()) {
+ allocation_tracker->PrepareForSerialization();
+ }
ASSERT(writer_ == NULL);
writer_ = new OutputStreamWriter(stream);
SerializeImpl();
@@ -2668,6 +2702,16 @@
SerializeEdges();
if (writer_->aborted()) return;
writer_->AddString("],\n");
+
+ writer_->AddString("\"trace_function_infos\":[");
+ SerializeTraceNodeInfos();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+ writer_->AddString("\"trace_tree\":[");
+ SerializeTraceTree();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+
writer_->AddString("\"strings\":[");
SerializeStrings();
if (writer_->aborted()) return;
@@ -2828,7 +2872,20 @@
JSON_S("shortcut") ","
JSON_S("weak")) ","
JSON_S("string_or_number") ","
- JSON_S("node"))));
+ JSON_S("node")) ","
+ JSON_S("trace_function_info_fields") ":" JSON_A(
+ JSON_S("function_id") ","
+ JSON_S("name") ","
+ JSON_S("script_name") ","
+ JSON_S("script_id") ","
+ JSON_S("line") ","
+ JSON_S("column")) ","
+ JSON_S("trace_node_fields") ":" JSON_A(
+ JSON_S("id") ","
+ JSON_S("function_id") ","
+ JSON_S("count") ","
+ JSON_S("size") ","
+ JSON_S("children"))));
#undef JSON_S
#undef JSON_O
#undef JSON_A
@@ -2836,6 +2893,13 @@
writer_->AddNumber(snapshot_->entries().length());
writer_->AddString(",\"edge_count\":");
writer_->AddNumber(snapshot_->edges().length());
+ writer_->AddString(",\"trace_function_count\":");
+ uint32_t count = 0;
+ AllocationTracker* tracker =
snapshot_->collection()->allocation_tracker();
+ if (tracker) {
+ count = tracker->id_to_function_info()->occupancy();
+ }
+ writer_->AddNumber(count);
}
@@ -2847,6 +2911,100 @@
w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
w->AddCharacter(hex_chars[u & 0xf]);
}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceTree() {
+ AllocationTracker* tracker =
snapshot_->collection()->allocation_tracker();
+ if (!tracker) return;
+ AllocationTraceTree* traces = tracker->trace_tree();
+ SerializeTraceNode(traces->root());
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode*
node) {
+ // The buffer needs space for 4 unsigned ints, 4 commas, [ and \0
+ const int kBufferSize =
+ 4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 4 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ int buffer_pos = 0;
+ buffer_pos = utoa(node->id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->function_id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->allocation_count(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->allocation_size(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer[buffer_pos++] = '[';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+
+ Vector<AllocationTraceNode*> children = node->children();
+ for (int i = 0; i < children.length(); i++) {
+ if (i > 0) {
+ writer_->AddCharacter(',');
+ }
+ SerializeTraceNode(children[i]);
+ }
+ writer_->AddCharacter(']');
+}
+
+
+// 0-based position is converted to 1-based during the serialization.
+static int SerializePosition(int position, const Vector<char>& buffer,
+ int buffer_pos) {
+ if (position == -1) {
+ buffer[buffer_pos++] = '0';
+ } else {
+ ASSERT(position >= 0);
+ buffer_pos = utoa(static_cast<unsigned>(position + 1), buffer,
buffer_pos);
+ }
+ return buffer_pos;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
+ AllocationTracker* tracker =
snapshot_->collection()->allocation_tracker();
+ if (!tracker) return;
+ // The buffer needs space for 6 unsigned ints, 6 commas, \n and \0
+ const int kBufferSize =
+ 6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 6 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ HashMap* id_to_function_info = tracker->id_to_function_info();
+ bool first_entry = true;
+ for (HashMap::Entry* p = id_to_function_info->Start();
+ p != NULL;
+ p = id_to_function_info->Next(p)) {
+ SnapshotObjectId id =
+ static_cast<SnapshotObjectId>(reinterpret_cast<intptr_t>(p->key));
+ AllocationTracker::FunctionInfo* info =
+ reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
+ int buffer_pos = 0;
+ if (first_entry) {
+ first_entry = false;
+ } else {
+ buffer[buffer_pos++] = ',';
+ }
+ buffer_pos = utoa(id, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(info->name), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(info->script_name), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ // The cast is safe because script id is a non-negative Smi.
+ buffer_pos = utoa(static_cast<unsigned>(info->script_id), buffer,
+ buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = SerializePosition(info->line, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = SerializePosition(info->column, buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+ }
+}
void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
=======================================
--- /branches/bleeding_edge/src/heap-snapshot-generator.h Tue Nov 5
13:46:11 2013 UTC
+++ /branches/bleeding_edge/src/heap-snapshot-generator.h Thu Nov 14
12:13:26 2013 UTC
@@ -33,6 +33,8 @@
namespace v8 {
namespace internal {
+class AllocationTracker;
+class AllocationTraceNode;
class HeapEntry;
class HeapSnapshot;
@@ -296,8 +298,8 @@
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
return ids_.PushHeapObjectsStats(stream);
}
- void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
- void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }
+ void StartHeapObjectsTracking();
+ void StopHeapObjectsTracking();
HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
void SnapshotGenerationFinished(HeapSnapshot* snapshot);
@@ -305,6 +307,7 @@
void RemoveSnapshot(HeapSnapshot* snapshot);
StringsStorage* names() { return &names_; }
+ AllocationTracker* allocation_tracker() { return allocation_tracker_; }
SnapshotObjectId FindObjectId(Address object_addr) {
return ids_.FindEntry(object_addr);
@@ -316,7 +319,7 @@
void ObjectMoveEvent(Address from, Address to, int size) {
ids_.MoveObject(from, to, size);
}
- void NewObjectEvent(Address addr, int size) { ids_.NewObject(addr,
size); }
+ void NewObjectEvent(Address addr, int size);
void UpdateObjectSizeEvent(Address addr, int size) {
ids_.UpdateObjectSize(addr, size);
}
@@ -335,6 +338,7 @@
StringsStorage names_;
// Mapping from HeapObject addresses to objects' uids.
HeapObjectsMap ids_;
+ AllocationTracker* allocation_tracker_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
};
@@ -675,6 +679,9 @@
void SerializeNode(HeapEntry* entry);
void SerializeNodes();
void SerializeSnapshot();
+ void SerializeTraceTree();
+ void SerializeTraceNode(AllocationTraceNode* node);
+ void SerializeTraceNodeInfos();
void SerializeString(const unsigned char* s);
void SerializeStrings();
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap-profiler.cc Tue Nov 5
13:46:11 2013 UTC
+++ /branches/bleeding_edge/test/cctest/test-heap-profiler.cc Thu Nov 14
12:13:26 2013 UTC
@@ -31,6 +31,7 @@
#include "v8.h"
+#include "allocation-tracker.h"
#include "cctest.h"
#include "hashmap.h"
#include "heap-profiler.h"
@@ -39,6 +40,12 @@
#include "utils-inl.h"
#include "../include/v8-profiler.h"
+using i::AllocationTraceNode;
+using i::AllocationTraceTree;
+using i::AllocationTracker;
+using i::HashMap;
+using i::Vector;
+
namespace {
class NamedEntriesDetector {
@@ -2125,3 +2132,101 @@
node = GetNodeByPath(snapshot, builtin_path, ARRAY_SIZE(builtin_path));
CHECK_NE(NULL, node);
}
+
+
+static const char* record_trace_tree_source =
+"var topFunctions = [];\n"
+"var global = this;\n"
+"function generateFunctions(width, depth) {\n"
+" var script = [];\n"
+" for (var i = 0; i < width; i++) {\n"
+" for (var j = 0; j < depth; j++) {\n"
+" script.push('function f_' + i + '_' + j + '(x) {\\n');\n"
+" script.push(' try {\\n');\n"
+" if (j < depth-2) {\n"
+" script.push(' return f_' + i + '_' + (j+1) + '(x+1);\\n');\n"
+" } else if (j == depth - 2) {\n"
+" script.push(' return new f_' + i + '_' + (depth - 1)
+ '();\\n');\n"
+" } else if (j == depth - 1) {\n"
+" script.push(' this.ts = Date.now();\\n');\n"
+" }\n"
+" script.push(' } catch (e) {}\\n');\n"
+" script.push('}\\n');\n"
+" \n"
+" }\n"
+" }\n"
+" var script = script.join('');\n"
+" // throw script;\n"
+" global.eval(script);\n"
+" for (var i = 0; i < width; i++) {\n"
+" topFunctions.push(this['f_' + i + '_0']);\n"
+" }\n"
+"}\n"
+"\n"
+"var width = 3;\n"
+"var depth = 3;\n"
+"generateFunctions(width, depth);\n"
+"var instances = [];\n"
+"function start() {\n"
+" for (var i = 0; i < width; i++) {\n"
+" instances.push(topFunctions[i](0));\n"
+" }\n"
+"}\n"
+"\n"
+"for (var i = 0; i < 100; i++) start();\n";
+
+
+static i::HeapSnapshot* ToInternal(const v8::HeapSnapshot* snapshot) {
+ return const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(snapshot));
+}
+
+
+static AllocationTraceNode* FindNode(
+ AllocationTracker* tracker, const Vector<const char*>& names) {
+ AllocationTraceNode* node = tracker->trace_tree()->root();
+ for (int i = 0; node != NULL && i < names.length(); i++) {
+ const char* name = names[i];
+ Vector<AllocationTraceNode*> children = node->children();
+ node = NULL;
+ for (int j = 0; j < children.length(); j++) {
+ v8::SnapshotObjectId id = children[j]->function_id();
+ AllocationTracker::FunctionInfo* info = tracker->GetFunctionInfo(id);
+ if (info && strcmp(info->name, name) == 0) {
+ node = children[j];
+ break;
+ }
+ }
+ }
+ return node;
+}
+
+
+TEST(TrackHeapAllocations) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ heap_profiler->StartRecordingHeapAllocations();
+
+ CompileRun(record_trace_tree_source);
+
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot(
+ v8::String::New("Test"));
+ i::HeapSnapshotsCollection* collection =
ToInternal(snapshot)->collection();
+ AllocationTracker* tracker = collection->allocation_tracker();
+ CHECK_NE(NULL, tracker);
+ // Resolve all function locations.
+ tracker->PrepareForSerialization();
+ // Print for better diagnostics in case of failure.
+ tracker->trace_tree()->Print(tracker);
+
+ const char* names[] =
+ { "(anonymous function)", "start", "f_0_0", "f_0_1", "f_0_2" };
+ AllocationTraceNode* node =
+ FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ CHECK_NE(NULL, node);
+ CHECK_GE(node->allocation_count(), 100);
+ CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
+ heap_profiler->StopRecordingHeapAllocations();
+}
=======================================
--- /branches/bleeding_edge/tools/gyp/v8.gyp Thu Nov 7 12:01:26 2013 UTC
+++ /branches/bleeding_edge/tools/gyp/v8.gyp Thu Nov 14 12:13:26 2013 UTC
@@ -217,6 +217,8 @@
'../../src/allocation.h',
'../../src/allocation-site-scopes.cc',
'../../src/allocation-site-scopes.h',
+ '../../src/allocation-tracker.cc',
+ '../../src/allocation-tracker.h',
'../../src/api.cc',
'../../src/api.h',
'../../src/apiutils.h',
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.