Revision: 21892
Author: [email protected]
Date: Fri Jun 20 07:35:48 2014 UTC
Log: --verify-predictable mode added for ensuring that GC behaves
deterministically.
In order to be able to use it one should pass verifypredictable=on to the
make tool or specify v8_enable_verify_predictable=1 in GYP_DEFINES.
[email protected]
Review URL: https://codereview.chromium.org/325553002
http://code.google.com/p/v8/source/detail?r=21892
Modified:
/branches/bleeding_edge/Makefile
/branches/bleeding_edge/build/features.gypi
/branches/bleeding_edge/src/builtins.cc
/branches/bleeding_edge/src/d8.cc
/branches/bleeding_edge/src/flag-definitions.h
/branches/bleeding_edge/src/heap-inl.h
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/heap.h
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/objects-printer.cc
/branches/bleeding_edge/src/runtime.cc
/branches/bleeding_edge/src/serialize.cc
/branches/bleeding_edge/src/serialize.h
/branches/bleeding_edge/src/spaces-inl.h
=======================================
--- /branches/bleeding_edge/Makefile Tue Jun 3 18:10:10 2014 UTC
+++ /branches/bleeding_edge/Makefile Fri Jun 20 07:35:48 2014 UTC
@@ -70,6 +70,10 @@
else
GYPFLAGS += -Dv8_enable_backtrace=1
endif
+# verifypredictable=on
+ifeq ($(verifypredictable), on)
+ GYPFLAGS += -Dv8_enable_verify_predictable=1
+endif
# snapshot=off
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
=======================================
--- /branches/bleeding_edge/build/features.gypi Tue Jun 10 10:51:33 2014 UTC
+++ /branches/bleeding_edge/build/features.gypi Fri Jun 20 07:35:48 2014 UTC
@@ -41,6 +41,8 @@
'v8_use_snapshot%': 'true',
+ 'v8_enable_verify_predictable%': 0,
+
# With post mortem support enabled, metadata is embedded into libv8
that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@@ -74,6 +76,9 @@
['v8_enable_verify_heap==1', {
'defines': ['VERIFY_HEAP',],
}],
+ ['v8_enable_verify_predictable==1', {
+ 'defines': ['VERIFY_PREDICTABLE',],
+ }],
['v8_interpreted_regexp==1', {
'defines': ['V8_INTERPRETED_REGEXP',],
}],
=======================================
--- /branches/bleeding_edge/src/builtins.cc Thu Jun 5 12:14:47 2014 UTC
+++ /branches/bleeding_edge/src/builtins.cc Fri Jun 20 07:35:48 2014 UTC
@@ -239,12 +239,8 @@
FixedArrayBase* new_elms =
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
- HeapProfiler* profiler = heap->isolate()->heap_profiler();
- if (profiler->is_tracking_object_moves()) {
- profiler->ObjectMoveEvent(elms->address(),
- new_elms->address(),
- new_elms->Size());
- }
+
+ heap->OnMoveEvent(new_elms, elms, new_elms->Size());
return new_elms;
}
=======================================
--- /branches/bleeding_edge/src/d8.cc Tue Jun 10 10:51:33 2014 UTC
+++ /branches/bleeding_edge/src/d8.cc Fri Jun 20 07:35:48 2014 UTC
@@ -290,9 +290,18 @@
#ifndef V8_SHARED
// performance.now() returns a time stamp as double, measured in
milliseconds.
+// When FLAG_verify_predictable mode is enabled it returns current value
+// of Heap::allocations_count().
void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>&
args) {
- i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
- args.GetReturnValue().Set(delta.InMillisecondsF());
+ if (i::FLAG_verify_predictable) {
+ Isolate* v8_isolate = args.GetIsolate();
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(v8_isolate)->heap();
+ args.GetReturnValue().Set(heap->synthetic_time());
+
+ } else {
+ i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
+ args.GetReturnValue().Set(delta.InMillisecondsF());
+ }
}
#endif // !V8_SHARED
=======================================
--- /branches/bleeding_edge/src/flag-definitions.h Fri Jun 13 10:50:11 2014
UTC
+++ /branches/bleeding_edge/src/flag-definitions.h Fri Jun 20 07:35:48 2014
UTC
@@ -866,6 +866,24 @@
#endif
#endif
+
+//
+// VERIFY_PREDICTABLE related flags
+//
+#undef FLAG
+
+#ifdef VERIFY_PREDICTABLE
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
+
+DEFINE_bool(verify_predictable, false,
+ "this mode is used for checking that V8 behaves predictably")
+DEFINE_int(dump_allocations_digest_at_alloc, 0,
+ "dump allocations digest each n-th allocation")
+
+
//
// Read-only flags
//
=======================================
--- /branches/bleeding_edge/src/heap-inl.h Wed Jun 18 13:26:02 2014 UTC
+++ /branches/bleeding_edge/src/heap-inl.h Fri Jun 20 07:35:48 2014 UTC
@@ -7,6 +7,7 @@
#include <cmath>
+#include "src/cpu-profiler.h"
#include "src/heap.h"
#include "src/heap-profiler.h"
#include "src/isolate.h"
@@ -180,7 +181,6 @@
ASSERT(AllowHandleAllocation::IsAllowed());
ASSERT(AllowHeapAllocation::IsAllowed());
ASSERT(gc_state_ == NOT_IN_GC);
- HeapProfiler* profiler = isolate_->heap_profiler();
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
AllowAllocationFailure::IsAllowed(isolate_) &&
@@ -200,8 +200,8 @@
retry_space != NEW_SPACE) {
space = retry_space;
} else {
- if (profiler->is_tracking_allocations() && allocation.To(&object)) {
- profiler->AllocationEvent(object->address(), size_in_bytes);
+ if (allocation.To(&object)) {
+ OnAllocationEvent(object, size_in_bytes);
}
return allocation;
}
@@ -212,7 +212,12 @@
} else if (OLD_DATA_SPACE == space) {
allocation = old_data_space_->AllocateRaw(size_in_bytes);
} else if (CODE_SPACE == space) {
- allocation = code_space_->AllocateRaw(size_in_bytes);
+ if (size_in_bytes <= code_space()->AreaSize()) {
+ allocation = code_space_->AllocateRaw(size_in_bytes);
+ } else {
+ // Large code objects are allocated in large object space.
+ allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
+ }
} else if (LO_SPACE == space) {
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
@@ -223,11 +228,96 @@
ASSERT(MAP_SPACE == space);
allocation = map_space_->AllocateRaw(size_in_bytes);
}
- if (allocation.IsRetry()) old_gen_exhausted_ = true;
- if (profiler->is_tracking_allocations() && allocation.To(&object)) {
+ if (allocation.To(&object)) {
+ OnAllocationEvent(object, size_in_bytes);
+ } else {
+ old_gen_exhausted_ = true;
+ }
+ return allocation;
+}
+
+
+void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
+ HeapProfiler* profiler = isolate_->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
profiler->AllocationEvent(object->address(), size_in_bytes);
}
- return allocation;
+
+ if (FLAG_verify_predictable) {
+ ++allocations_count_;
+
+ UpdateAllocationsHash(object);
+ UpdateAllocationsHash(size_in_bytes);
+
+ if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
+ (--dump_allocations_hash_countdown_ == 0)) {
+ dump_allocations_hash_countdown_ =
FLAG_dump_allocations_digest_at_alloc;
+ PrintAlloctionsHash();
+ }
+ }
+}
+
+
+void Heap::OnMoveEvent(HeapObject* target,
+ HeapObject* source,
+ int size_in_bytes) {
+ HeapProfiler* heap_profiler = isolate_->heap_profiler();
+ if (heap_profiler->is_tracking_object_moves()) {
+ heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+ size_in_bytes);
+ }
+
+ if (isolate_->logger()->is_logging_code_events() ||
+ isolate_->cpu_profiler()->is_profiling()) {
+ if (target->IsSharedFunctionInfo()) {
+ PROFILE(isolate_, SharedFunctionInfoMoveEvent(
+ source->address(), target->address()));
+ }
+ }
+
+ if (FLAG_verify_predictable) {
+ ++allocations_count_;
+
+ UpdateAllocationsHash(source);
+ UpdateAllocationsHash(target);
+ UpdateAllocationsHash(size_in_bytes);
+
+ if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
+ (--dump_allocations_hash_countdown_ == 0)) {
+ dump_allocations_hash_countdown_ =
FLAG_dump_allocations_digest_at_alloc;
+ PrintAlloctionsHash();
+ }
+ }
+}
+
+
+void Heap::UpdateAllocationsHash(HeapObject* object) {
+ Address object_address = object->address();
+ MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
+ AllocationSpace allocation_space = memory_chunk->owner()->identity();
+
+ STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
+ uint32_t value =
+ static_cast<uint32_t>(object_address - memory_chunk->address()) |
+ (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
+
+ UpdateAllocationsHash(value);
+}
+
+
+void Heap::UpdateAllocationsHash(uint32_t value) {
+ uint16_t c1 = static_cast<uint16_t>(value);
+ uint16_t c2 = static_cast<uint16_t>(value >> 16);
+ raw_allocations_hash_ =
+ StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
+ raw_allocations_hash_ =
+ StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
+}
+
+
+void Heap::PrintAlloctionsHash() {
+ uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
+ PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count_,
hash);
}
=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Jun 18 13:26:02 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Fri Jun 20 07:35:48 2014 UTC
@@ -64,7 +64,6 @@
survived_since_last_expansion_(0),
sweep_generation_(0),
always_allocate_scope_depth_(0),
- linear_allocation_scope_depth_(0),
contexts_disposed_(0),
global_ic_age_(0),
flush_monomorphic_ics_(false),
@@ -79,6 +78,9 @@
lo_space_(NULL),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
+ allocations_count_(0),
+ raw_allocations_hash_(0),
+
dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
ms_count_(0),
gc_count_(0),
remembered_unmapped_pages_index_(0),
@@ -1957,19 +1959,7 @@
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
- Isolate* isolate = heap->isolate();
- HeapProfiler* heap_profiler = isolate->heap_profiler();
- if (heap_profiler->is_tracking_object_moves()) {
- heap_profiler->ObjectMoveEvent(source->address(),
target->address(),
- size);
- }
- if (isolate->logger()->is_logging_code_events() ||
- isolate->cpu_profiler()->is_profiling()) {
- if (target->IsSharedFunctionInfo()) {
- PROFILE(isolate, SharedFunctionInfoMoveEvent(
- source->address(), target->address()));
- }
- }
+ heap->OnMoveEvent(target, source, size);
}
if (marks_handling == TRANSFER_MARKS) {
@@ -2224,6 +2214,7 @@
void Heap::SelectScavengingVisitorsTable() {
bool logging_and_profiling =
+ FLAG_verify_predictable ||
isolate()->logger()->is_logging() ||
isolate()->cpu_profiler()->is_profiling() ||
(isolate()->heap_profiler() != NULL &&
@@ -3338,29 +3329,28 @@
}
-AllocationResult Heap::AllocateCode(int object_size,
- bool immovable) {
+AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
ASSERT(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
- AllocationResult allocation;
- // Large code objects and code objects which should stay at a fixed
address
- // are allocated in large object space.
+ AllocationResult allocation =
+ AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
+
HeapObject* result;
- bool force_lo_space = object_size > code_space()->AreaSize();
- if (force_lo_space) {
- allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
- } else {
- allocation = AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
- }
if (!allocation.To(&result)) return allocation;
- if (immovable && !force_lo_space &&
- // Objects on the first page of each space are never moved.
- !code_space_->FirstPage()->Contains(result->address())) {
- // Discard the first code allocation, which was on a page where it
could be
- // moved.
- CreateFillerObjectAt(result->address(), object_size);
- allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
- if (!allocation.To(&result)) return allocation;
+ if (immovable) {
+ Address address = result->address();
+ // Code objects which should stay at a fixed address are allocated
either
+ // in the first page of code space (objects on the first page of each
space
+ // are never moved) or in large object space.
+ if (!code_space_->FirstPage()->Contains(address) &&
+ MemoryChunk::FromAddress(address)->owner()->identity() !=
LO_SPACE) {
+ // Discard the first code allocation, which was on a page where it
could
+ // be moved.
+ CreateFillerObjectAt(result->address(), object_size);
+ allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
+ if (!allocation.To(&result)) return allocation;
+ OnAllocationEvent(result, object_size);
+ }
}
result->set_map_no_write_barrier(code_map());
@@ -3387,15 +3377,10 @@
new_constant_pool = empty_constant_pool_array();
}
+ HeapObject* result;
// Allocate an object the same size as the code object.
int obj_size = code->Size();
- if (obj_size > code_space()->AreaSize()) {
- allocation = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
- } else {
- allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
- }
-
- HeapObject* result;
+ allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
if (!allocation.To(&result)) return allocation;
// Copy code object.
@@ -3444,15 +3429,10 @@
size_t relocation_offset =
static_cast<size_t>(code->instruction_end() - old_addr);
-
- AllocationResult allocation;
- if (new_obj_size > code_space()->AreaSize()) {
- allocation = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
- } else {
- allocation = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
- }
HeapObject* result;
+ AllocationResult allocation =
+ AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
if (!allocation.To(&result)) return allocation;
// Copy code object.
@@ -5258,6 +5238,10 @@
lo_space_->MaximumCommittedMemory());
PrintF("\n\n");
}
+
+ if (FLAG_verify_predictable) {
+ PrintAlloctionsHash();
+ }
TearDownArrayBuffers();
=======================================
--- /branches/bleeding_edge/src/heap.h Wed Jun 18 13:26:02 2014 UTC
+++ /branches/bleeding_edge/src/heap.h Fri Jun 20 07:35:48 2014 UTC
@@ -657,9 +657,6 @@
Address always_allocate_scope_depth_address() {
return reinterpret_cast<Address>(&always_allocate_scope_depth_);
}
- bool linear_allocation() {
- return linear_allocation_scope_depth_ != 0;
- }
Address* NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address();
@@ -977,6 +974,13 @@
#endif
}
+ // Number of "runtime allocations" done so far.
+ uint32_t allocations_count() { return allocations_count_; }
+
+ // Returns deterministic "time" value in ms. Works only with
+ // FLAG_verify_predictable.
+ double synthetic_time() { return allocations_count_ / 100.0; }
+
// Print short heap statistics.
void PrintShortHeapStatistics();
@@ -1437,6 +1441,17 @@
static void FatalProcessOutOfMemory(const char* location,
bool take_snapshot = false);
+ // This event is triggered after successful allocation of a new object
made
+ // by runtime. Allocations of target space for object evacuation do not
+ // trigger the event. In order to track ALL allocations one must turn off
+ // FLAG_inline_new and FLAG_use_allocation_folding.
+ inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
+
+ // This event is triggered after object is moved to a new place.
+ inline void OnMoveEvent(HeapObject* target,
+ HeapObject* source,
+ int size_in_bytes);
+
protected:
// Methods made available to tests.
@@ -1526,7 +1541,6 @@
int sweep_generation_;
int always_allocate_scope_depth_;
- int linear_allocation_scope_depth_;
// For keeping track of context disposals.
int contexts_disposed_;
@@ -1552,8 +1566,20 @@
// Returns the amount of external memory registered since last global gc.
int64_t PromotedExternalMemorySize();
- unsigned int ms_count_; // how many mark-sweep collections happened
- unsigned int gc_count_; // how many gc happened
+ // How many "runtime allocations" happened.
+ uint32_t allocations_count_;
+
+ // Running hash over allocations performed.
+ uint32_t raw_allocations_hash_;
+
+ // Countdown counter, dumps allocation hash when 0.
+ uint32_t dump_allocations_hash_countdown_;
+
+ // How many mark-sweep collections happened.
+ unsigned int ms_count_;
+
+ // How many gc happened.
+ unsigned int gc_count_;
// For post mortem debugging.
static const int kRememberedUnmappedPages = 128;
@@ -2063,6 +2089,10 @@
Object** weak_object_to_code_table_address() {
return &weak_object_to_code_table_;
}
+
+ inline void UpdateAllocationsHash(HeapObject* object);
+ inline void UpdateAllocationsHash(uint32_t value);
+ inline void PrintAlloctionsHash();
static const int kInitialStringTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Thu Jun 5 12:14:47 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Fri Jun 20 07:35:48 2014 UTC
@@ -2806,10 +2806,6 @@
AllocationSpace dest) {
Address dst_addr = dst->address();
Address src_addr = src->address();
- HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
- if (heap_profiler->is_tracking_object_moves()) {
- heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
- }
ASSERT(heap()->AllowedToBeMigrated(src, dest));
ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
@@ -2876,6 +2872,7 @@
ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
heap()->MoveBlock(dst_addr, src_addr, size);
}
+ heap()->OnMoveEvent(dst, src, size);
Memory::Address_at(src_addr) = dst_addr;
}
=======================================
--- /branches/bleeding_edge/src/objects-printer.cc Tue Jun 10 14:01:08 2014
UTC
+++ /branches/bleeding_edge/src/objects-printer.cc Fri Jun 20 07:35:48 2014
UTC
@@ -415,7 +415,7 @@
void JSObject::JSObjectPrint(FILE* out) {
- PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
+ HeapObject::PrintHeader(out, "JSObject");
PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
// Don't call GetElementsKind, its validation code can cause the printer
to
// fail when debugging.
=======================================
--- /branches/bleeding_edge/src/runtime.cc Tue Jun 17 14:10:16 2014 UTC
+++ /branches/bleeding_edge/src/runtime.cc Fri Jun 20 07:35:48 2014 UTC
@@ -9652,7 +9652,13 @@
// the number in a Date object representing a particular instant in
// time is milliseconds. Therefore, we floor the result of getting
// the OS time.
- double millis = std::floor(OS::TimeCurrentMillis());
+ double millis;
+ if (FLAG_verify_predictable) {
+ millis = 1388534400000.0; // Jan 1 2014 00:00:00 GMT+0000
+ millis += std::floor(isolate->heap()->synthetic_time());
+ } else {
+ millis = std::floor(OS::TimeCurrentMillis());
+ }
return *isolate->factory()->NewNumber(millis);
}
=======================================
--- /branches/bleeding_edge/src/serialize.cc Tue Jun 10 10:51:33 2014 UTC
+++ /branches/bleeding_edge/src/serialize.cc Fri Jun 20 07:35:48 2014 UTC
@@ -849,6 +849,7 @@
int size = source_->GetInt() << kObjectAlignmentBits;
Address address = Allocate(space_number, size);
HeapObject* obj = HeapObject::FromAddress(address);
+ isolate_->heap()->OnAllocationEvent(obj, size);
*write_back = obj;
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
=======================================
--- /branches/bleeding_edge/src/serialize.h Tue Jun 17 14:24:19 2014 UTC
+++ /branches/bleeding_edge/src/serialize.h Fri Jun 20 07:35:48 2014 UTC
@@ -334,10 +334,6 @@
Address Allocate(int space_index, int size) {
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
- HeapProfiler* profiler = isolate_->heap_profiler();
- if (profiler->is_tracking_allocations()) {
- profiler->AllocationEvent(address, size);
- }
return address;
}
=======================================
--- /branches/bleeding_edge/src/spaces-inl.h Tue Jun 3 08:12:43 2014 UTC
+++ /branches/bleeding_edge/src/spaces-inl.h Fri Jun 20 07:35:48 2014 UTC
@@ -253,26 +253,14 @@
// Raw allocation.
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
HeapObject* object = AllocateLinearly(size_in_bytes);
- if (object != NULL) {
- if (identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- return object;
- }
- ASSERT(!heap()->linear_allocation() ||
- (anchor_.next_chunk() == &anchor_ &&
- anchor_.prev_chunk() == &anchor_));
-
- object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) {
- if (identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
+ if (object == NULL) {
+ object = free_list_.Allocate(size_in_bytes);
+ if (object == NULL) {
+ object = SlowAllocateRaw(size_in_bytes);
}
- return object;
}
- object = SlowAllocateRaw(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.