Revision: 17392
Author: [email protected]
Date: Fri Oct 25 11:10:28 2013 UTC
Log: Define DEBUG for v8_optimized_debug=2
Thereby ensuring there is only a minimal performance regression vs. NDEBUG
(now it's only about 10% slower rather than ~2x).
[email protected], [email protected]
Review URL: https://codereview.chromium.org/39183004
http://code.google.com/p/v8/source/detail?r=17392
Modified:
/branches/bleeding_edge/build/toolchain.gypi
/branches/bleeding_edge/src/ast.cc
/branches/bleeding_edge/src/checks.cc
/branches/bleeding_edge/src/checks.h
/branches/bleeding_edge/src/contexts.cc
/branches/bleeding_edge/src/conversions-inl.h
/branches/bleeding_edge/src/deoptimizer.h
/branches/bleeding_edge/src/elements.cc
/branches/bleeding_edge/src/flag-definitions.h
/branches/bleeding_edge/src/incremental-marking.cc
/branches/bleeding_edge/src/list.h
/branches/bleeding_edge/src/objects-inl.h
/branches/bleeding_edge/src/objects.cc
/branches/bleeding_edge/src/utils.h
=======================================
--- /branches/bleeding_edge/build/toolchain.gypi Mon Oct 14 13:07:41 2013
UTC
+++ /branches/bleeding_edge/build/toolchain.gypi Fri Oct 25 11:10:28 2013
UTC
@@ -436,6 +436,7 @@
'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
+ 'DEBUG'
],
'msvs_settings': {
'VCCLCompilerTool': {
@@ -503,15 +504,6 @@
},
},
'conditions': [
- ['v8_optimized_debug==2', {
- 'defines': [
- 'NDEBUG',
- ],
- }, {
- 'defines': [
- 'DEBUG',
- ],
- }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or
OS=="netbsd"', {
'cflags':
[ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
@@ -553,6 +545,9 @@
'-fdata-sections',
'-ffunction-sections',
],
+ 'defines': [
+ 'OPTIMIZED_DEBUG'
+ ],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {
=======================================
--- /branches/bleeding_edge/src/ast.cc Wed Oct 23 10:41:21 2013 UTC
+++ /branches/bleeding_edge/src/ast.cc Fri Oct 25 11:10:28 2013 UTC
@@ -627,7 +627,7 @@
holder_ = GetPrototypeForPrimitiveCheck(check_type_,
oracle->isolate());
receiver_types_.Add(handle(holder_->map()), oracle->zone());
}
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
=======================================
--- /branches/bleeding_edge/src/checks.cc Mon Sep 23 14:11:59 2013 UTC
+++ /branches/bleeding_edge/src/checks.cc Fri Oct 25 11:10:28 2013 UTC
@@ -128,8 +128,6 @@
namespace v8 { namespace internal {
-
- bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
=======================================
--- /branches/bleeding_edge/src/checks.h Thu Sep 12 12:08:54 2013 UTC
+++ /branches/bleeding_edge/src/checks.h Fri Oct 25 11:10:28 2013 UTC
@@ -272,7 +272,24 @@
#endif
+#ifdef DEBUG
+#ifndef OPTIMIZED_DEBUG
+#define ENABLE_SLOW_ASSERTS 1
+#endif
+#endif
+
+namespace v8 {
+namespace internal {
+#ifdef ENABLE_SLOW_ASSERTS
+#define SLOW_ASSERT(condition) \
+ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
extern bool FLAG_enable_slow_asserts;
+#else
+#define SLOW_ASSERT(condition) ((void) 0)
+const bool FLAG_enable_slow_asserts = false;
+#endif
+} // namespace internal
+} // namespace v8
// The ASSERT macro is equivalent to CHECK except that it only
@@ -285,7 +302,6 @@
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts ||
(condition))
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
@@ -294,7 +310,6 @@
#define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
-#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be
// safely enabled in release mode. Moreover, the ((void) 0) expression
=======================================
--- /branches/bleeding_edge/src/contexts.cc Wed Sep 4 13:53:24 2013 UTC
+++ /branches/bleeding_edge/src/contexts.cc Fri Oct 25 11:10:28 2013 UTC
@@ -259,7 +259,7 @@
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsNativeContext());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
=======================================
--- /branches/bleeding_edge/src/conversions-inl.h Fri Jul 19 09:57:35 2013
UTC
+++ /branches/bleeding_edge/src/conversions-inl.h Fri Oct 25 11:10:28 2013
UTC
@@ -355,7 +355,7 @@
return JunkStringValue();
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@@ -692,7 +692,7 @@
exponent--;
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos),
exponent);
=======================================
--- /branches/bleeding_edge/src/deoptimizer.h Wed Oct 16 03:30:06 2013 UTC
+++ /branches/bleeding_edge/src/deoptimizer.h Fri Oct 25 11:10:28 2013 UTC
@@ -506,7 +506,15 @@
void SetCallerFp(unsigned offset, intptr_t value);
intptr_t GetRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(registers_));
+#if DEBUG
+ // This convoluted ASSERT is needed to work around a gcc problem that
+ // improperly detects an array bounds overflow in optimized debug
builds
+ // when using a plain ASSERT.
+ if (n >= ARRAY_SIZE(registers_)) {
+ ASSERT(false);
+ return 0;
+ }
+#endif
return registers_[n];
}
=======================================
--- /branches/bleeding_edge/src/elements.cc Tue Sep 10 14:30:36 2013 UTC
+++ /branches/bleeding_edge/src/elements.cc Fri Oct 25 11:10:28 2013 UTC
@@ -792,7 +792,7 @@
FixedArray* to,
FixedArrayBase* from) {
int len0 = to->length();
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < len0; i++) {
ASSERT(!to->get(i)->IsTheHole());
=======================================
--- /branches/bleeding_edge/src/flag-definitions.h Tue Oct 22 17:03:57 2013
UTC
+++ /branches/bleeding_edge/src/flag-definitions.h Fri Oct 25 11:10:28 2013
UTC
@@ -696,8 +696,10 @@
#endif
// checks.cc
+#ifndef OPTIMIZED_DEBUG
DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
+#endif
// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_bool(print_source, false, "pretty print source code")
=======================================
--- /branches/bleeding_edge/src/incremental-marking.cc Fri Oct 4 07:25:24
2013 UTC
+++ /branches/bleeding_edge/src/incremental-marking.cc Fri Oct 25 11:10:28
2013 UTC
@@ -728,7 +728,7 @@
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
MarkBit mark_bit = Marking::MarkBitFrom(obj);
-#ifdef DEBUG
+#if ENABLE_SLOW_ASSERTS
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
=======================================
--- /branches/bleeding_edge/src/list.h Mon Sep 2 11:39:23 2013 UTC
+++ /branches/bleeding_edge/src/list.h Fri Oct 25 11:10:28 2013 UTC
@@ -84,7 +84,7 @@
// backing store (e.g. Add).
inline T& operator[](int i) const {
ASSERT(0 <= i);
- ASSERT(i < length_);
+ SLOW_ASSERT(i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
=======================================
--- /branches/bleeding_edge/src/objects-inl.h Mon Oct 14 13:35:06 2013 UTC
+++ /branches/bleeding_edge/src/objects-inl.h Fri Oct 25 11:10:28 2013 UTC
@@ -80,7 +80,7 @@
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
- ASSERT(object->Is##type()); \
+ SLOW_ASSERT(object->Is##type()); \
return reinterpret_cast<type*>(object); \
}
@@ -1190,7 +1190,7 @@
Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
- ASSERT(heap != NULL);
+ SLOW_ASSERT(heap != NULL);
return heap;
}
@@ -1307,7 +1307,7 @@
void JSObject::ValidateElements() {
-#if DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = GetElementsAccessor();
accessor->Validate(this);
@@ -1901,7 +1901,7 @@
Object* FixedArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
+ SLOW_ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
=======================================
--- /branches/bleeding_edge/src/objects.cc Fri Oct 25 09:58:21 2013 UTC
+++ /branches/bleeding_edge/src/objects.cc Fri Oct 25 11:10:28 2013 UTC
@@ -1196,7 +1196,7 @@
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
ASSERT(!this->IsExternalString());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -1253,7 +1253,7 @@
bool String::MakeExternal(v8::String::ExternalAsciiStringResource*
resource) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -4483,7 +4483,7 @@
Handle<Map>::cast(result)->SharedMapVerify();
}
#endif
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map
bit-by-bit,
// except for the code cache, which can contain some ics which can be
@@ -7828,7 +7828,7 @@
accessor->AddElementsToFixedArray(array, array, this);
FixedArray* result;
if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@@ -7846,7 +7846,7 @@
accessor->AddElementsToFixedArray(NULL, NULL, this, other);
FixedArray* result;
if (!maybe_result->To(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@@ -8901,7 +8901,7 @@
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (HasHashCode() && other->HasHashCode()) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
if (Hash() != other->Hash()) {
bool found_difference = false;
=======================================
--- /branches/bleeding_edge/src/utils.h Mon Sep 23 18:57:32 2013 UTC
+++ /branches/bleeding_edge/src/utils.h Fri Oct 25 11:10:28 2013 UTC
@@ -419,8 +419,8 @@
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
- ASSERT(to <= length_);
- ASSERT(from < to);
+ SLOW_ASSERT(to <= length_);
+ SLOW_ASSERT(from < to);
ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.