Reviewers: Vitaly Repeshko,
Message:
Will this fix it, or is there a fundamental reason why we need
isolate-dependent
copies. I would like to eventually move these to a thread-safe startup
phase,
simplifying them, so making them isolate-dependent would be a move in the
wrong
direction.
Description:
Remove counters from generated isolate-independent MemCopy on ia32 platform.
BUG=
TEST=
Please review this at http://codereview.chromium.org/6740019/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files:
M src/ia32/codegen-ia32.cc
M src/v8-counters.h
Index: src/ia32/codegen-ia32.cc
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index
4593b64fba5018dcf8f2c7e5aac4d687a99d3ba0..98f8822ff8759b9b4ad21a9f38d432666e578d68
100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -10177,7 +10177,13 @@ static void MemCopyWrapper(void* dest, const void*
src, size_t size) {
OS::MemCopyFunction CreateMemCopyFunction() {
HandleScope scope;
- MacroAssembler masm(NULL, 1 * KB);
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
+ if (buffer == NULL) return &MemCopyWrapper;
+ MacroAssembler masm(buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -10233,7 +10239,6 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ test(Operand(src), Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
- __ IncrementCounter(masm.isolate()->counters()->memcopy_aligned(),
1);
// Copy loop for aligned source and destination.
__ mov(edx, count);
Register loop_count = ecx;
@@ -10281,7 +10286,6 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// Copy loop for unaligned source and aligned destination.
// If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
- __ IncrementCounter(masm.isolate()->counters()->memcopy_unaligned(),
1);
__ mov(edx, ecx);
Register loop_count = ecx;
Register count = edx;
@@ -10325,7 +10329,6 @@ OS::MemCopyFunction CreateMemCopyFunction() {
}
} else {
- __ IncrementCounter(masm.isolate()->counters()->memcopy_noxmm(), 1);
// SSE2 not supported. Unlikely to happen in practice.
__ push(edi);
__ push(esi);
@@ -10372,13 +10375,8 @@ OS::MemCopyFunction CreateMemCopyFunction() {
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
- // Copy the generated code into an executable chunk and return a pointer
- // to the first instruction in it as a C++ function pointer.
- LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size,
EXECUTABLE);
- if (chunk == NULL) return &MemCopyWrapper;
- memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
- CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
- return FUNCTION_CAST<OS::MemCopyFunction>(chunk->GetStartAddress());
+ CPU::FlushICache(buffer, actual_size);
+ return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
}
#undef __
Index: src/v8-counters.h
diff --git a/src/v8-counters.h b/src/v8-counters.h
index
04482e80499e8d3902882ad683657c03864c33c0..5e765b277f6e3e7101d0b1605e12f76f55f24f24
100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -202,9 +202,6 @@ namespace internal {
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
- SC(memcopy_aligned, V8.MemCopyAligned) \
- SC(memcopy_unaligned, V8.MemCopyUnaligned) \
- SC(memcopy_noxmm, V8.MemCopyNoXMM) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev