Reviewers: Michael Starzinger,
Message:
PTAL
Description:
Make v8_optimized_debug=1 almost as fast as v8_optimized_debug=2
[email protected]
Please review this at https://codereview.chromium.org/39183004/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files (+56, -27 lines):
M build/toolchain.gypi
M src/ast.cc
M src/checks.h
M src/checks.cc
M src/contexts.cc
M src/conversions-inl.h
M src/deoptimizer.h
M src/elements.cc
M src/flag-definitions.h
M src/incremental-marking.cc
M src/list.h
M src/msan.h
M src/objects-inl.h
M src/objects.cc
M src/store-buffer.cc
M src/utils.h
Index: build/toolchain.gypi
diff --git a/build/toolchain.gypi b/build/toolchain.gypi
index
e1903f300fd757090f123a16f7456880b9779748..02dc2bce7eba3e577faf428fa9c0763970c0a8c0
100644
--- a/build/toolchain.gypi
+++ b/build/toolchain.gypi
@@ -533,15 +533,18 @@
['v8_optimized_debug==1', {
'cflags!': [
'-O0',
- '-O3', # TODO(2807) should be -O1.
+ '-O1',
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
- '-O1', # TODO(2807) should be -O3.
+ '-O3'
],
+ 'defines': [
+ 'OPTIMIZED_DEBUG'
+ ]
}],
['v8_optimized_debug==2', {
'cflags!': [
Index: src/ast.cc
diff --git a/src/ast.cc b/src/ast.cc
index
481414eb2c2802f0d0922fb6b2f31c8e07361954..843f8c89600d66d5e201064d75643fa6802a04c8
100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -627,7 +627,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle*
oracle,
holder_ = GetPrototypeForPrimitiveCheck(check_type_,
oracle->isolate());
receiver_types_.Add(handle(holder_->map()), oracle->zone());
}
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
Index: src/checks.cc
diff --git a/src/checks.cc b/src/checks.cc
index
d2a107c227694646c35c0538f26fffdd80cf3921..e08cd7c6856143a84f91ed3553ac3c6e73264ebe
100644
--- a/src/checks.cc
+++ b/src/checks.cc
@@ -129,8 +129,6 @@ void API_Fatal(const char* location, const char*
format, ...) {
namespace v8 { namespace internal {
- bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
-
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
} } // namespace v8::internal
Index: src/checks.h
diff --git a/src/checks.h b/src/checks.h
index
f5c5f232bd5e3e12afb8b8c0ec0e9154477080dd..d4c6039e3aa7f376fa40b6360191c4a497cf913e
100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -272,7 +272,21 @@ template <int> class StaticAssertionHelper { };
#endif
+#ifdef DEBUG
+#ifndef OPTIMIZED_DEBUG
+#define ENABLE_SLOW_ASSERTS 1
+#endif
+#endif
+
+namespace v8 {
+namespace internal {
+#if ENABLE_SLOW_ASSERTS
extern bool FLAG_enable_slow_asserts;
+#else
+const bool FLAG_enable_slow_asserts = false;
+#endif
+} // namespace internal
+} // namespace v8
// The ASSERT macro is equivalent to CHECK except that it only
@@ -285,7 +299,12 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts ||
(condition))
+#ifdef ENABLE_SLOW_ASSERTS
+#define SLOW_ASSERT(condition) \
+ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
+#else
+#define SLOW_ASSERT(condition) ((void) 0)
+#endif
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
Index: src/contexts.cc
diff --git a/src/contexts.cc b/src/contexts.cc
index
441ef9d9c32b6a70b7af5440f1a61fc1e8310c1b..710d30aa8ec2a4138da312750667d8e7b87b3409
100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -259,7 +259,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsNativeContext());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
Index: src/conversions-inl.h
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index
2f0a399d1a4d951296f933a35a3a40e90f954693..7ba19ba0f1d571657f51f68865dc24b700fc8ef3
100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -355,7 +355,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
return JunkStringValue();
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@@ -692,7 +692,7 @@ double InternalStringToDouble(UnicodeCache*
unicode_cache,
exponent--;
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos),
exponent);
Index: src/deoptimizer.h
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index
706d1f0523eb026b96fc0f0011aaf6bafcdea170..4e9d281ea52f762305a3435b364b88674dd2f55f
100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -506,7 +506,15 @@ class FrameDescription {
void SetCallerFp(unsigned offset, intptr_t value);
intptr_t GetRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(registers_));
+#if DEBUG
+ // This convoluted ASSERT is needed to work around a gcc problem that
+ // improperly detects an array bounds overflow in optimized debug
builds
+ // when using a plain ASSERT.
+ if (n >= ARRAY_SIZE(registers_)) {
+ ASSERT(false);
+ return 0;
+ }
+#endif
return registers_[n];
}
Index: src/elements.cc
diff --git a/src/elements.cc b/src/elements.cc
index
89621cb3694ad63288f5b1f9556326da5e02dcc5..0b745c4505fc2fa40b32bc472eb2f405f4eefed5
100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -792,7 +792,7 @@ class ElementsAccessorBase : public ElementsAccessor {
FixedArray* to,
FixedArrayBase* from) {
int len0 = to->length();
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < len0; i++) {
ASSERT(!to->get(i)->IsTheHole());
Index: src/flag-definitions.h
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index
aa889f3fd905aa5838a47936ebc1fad4ac272676..ae9c36e7493b158e21bcf0704dd33c1863e9cffe
100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -696,8 +696,10 @@ DEFINE_bool(stress_compaction, false,
#endif
// checks.cc
+#if !defined(OPTIMIZED_DEBUG)
DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
+#endif
// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_bool(print_source, false, "pretty print source code")
Index: src/incremental-marking.cc
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index
49936d7ce8fe2a5f651dc4636d5a82f26bc2b8a8..724c2f868e8c78273342e56068bd30d6ccdd36f0
100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -728,7 +728,7 @@ void IncrementalMarking::VisitObject(Map* map,
HeapObject* obj, int size) {
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
MarkBit mark_bit = Marking::MarkBitFrom(obj);
-#ifdef DEBUG
+#if defined(DEBUG) && !defined(OPTIMIZED_DEBUG)
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
Index: src/list.h
diff --git a/src/list.h b/src/list.h
index
0e4e35bb41b78b3ff5aa8b106ea284d1e4ded91c..41666deb2678f47520ef64fc63691cf3676669a2
100644
--- a/src/list.h
+++ b/src/list.h
@@ -84,7 +84,7 @@ class List {
// backing store (e.g. Add).
inline T& operator[](int i) const {
ASSERT(0 <= i);
- ASSERT(i < length_);
+ SLOW_ASSERT(i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
Index: src/msan.h
diff --git a/src/msan.h b/src/msan.h
index
3bb746d7fa483951a987f6a2f37978ed031d1cd0..484c9fa39797187597e534ca14ca8e89fd990135
100644
--- a/src/msan.h
+++ b/src/msan.h
@@ -34,7 +34,7 @@
# define __has_feature(x) 0
#endif
-#if __has_feature(memory_sanitizer)
+#if __has_feature(memory_sanitizer) && !defined(MEMORY_SANITIZER)
# define MEMORY_SANITIZER
#endif
Index: src/objects-inl.h
diff --git a/src/objects-inl.h b/src/objects-inl.h
index
006aff394c20cd517c516e0823424f3aa42c9c80..11abf4d8138a330db3230347db5116dc7a59de53
100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -80,7 +80,7 @@ PropertyDetails PropertyDetails::AsDeleted() {
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
- ASSERT(object->Is##type()); \
+ SLOW_ASSERT(object->Is##type()); \
return reinterpret_cast<type*>(object); \
}
@@ -1190,7 +1190,7 @@ void HeapObject::VerifySmiField(int offset) {
Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
- ASSERT(heap != NULL);
+ SLOW_ASSERT(heap != NULL);
return heap;
}
@@ -1307,7 +1307,7 @@ FixedArrayBase* JSObject::elements() {
void JSObject::ValidateElements() {
-#if DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = GetElementsAccessor();
accessor->Validate(this);
@@ -1901,7 +1901,7 @@ FixedArrayBase* FixedArrayBase::cast(Object* object) {
Object* FixedArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
+ SLOW_ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
Index: src/objects.cc
diff --git a/src/objects.cc b/src/objects.cc
index
2889014b46f8087986cb8ab89ec70d250f86b50b..aea3e07a7336361507e4c2b503e615655d3605ec
100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1196,7 +1196,7 @@ bool
String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
ASSERT(!this->IsExternalString());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -1253,7 +1253,7 @@ bool
String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool String::MakeExternal(v8::String::ExternalAsciiStringResource*
resource) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -4483,7 +4483,7 @@ Handle<Map>
NormalizedMapCache::Get(Handle<NormalizedMapCache> cache,
Map::cast(result)->SharedMapVerify();
}
#endif
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map
bit-by-bit,
// except for the code cache, which can contain some ics which can be
@@ -7844,7 +7844,7 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray*
array) {
accessor->AddElementsToFixedArray(array, array, this);
FixedArray* result;
if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@@ -7862,7 +7862,7 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray*
other) {
accessor->AddElementsToFixedArray(NULL, NULL, this, other);
FixedArray* result;
if (!maybe_result->To(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@@ -8917,7 +8917,7 @@ bool String::SlowEquals(String* other) {
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (HasHashCode() && other->HasHashCode()) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
if (Hash() != other->Hash()) {
bool found_difference = false;
Index: src/store-buffer.cc
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index
22a546742c85f376839ef68528728cd69bb3cc53..51fd42a4f12329715f3e4320391abba7f6b22514
100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -310,7 +310,6 @@ void StoreBuffer::Clean() {
static Address* in_store_buffer_1_element_cache = NULL;
-
bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
if (!FLAG_enable_slow_asserts) return true;
if (in_store_buffer_1_element_cache != NULL &&
Index: src/utils.h
diff --git a/src/utils.h b/src/utils.h
index
4a08319044bf7f0768970f539a1a41ac3f298747..062019af4604f0288e1ad2802baba825d75c28ef
100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -419,8 +419,8 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
- ASSERT(to <= length_);
- ASSERT(from < to);
+ SLOW_ASSERT(to <= length_);
+ SLOW_ASSERT(from < to);
ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.