Author: [email protected]
Date: Tue Mar 24 05:47:53 2009
New Revision: 1595

Modified:
    branches/bleeding_edge/src/flag-definitions.h
    branches/bleeding_edge/src/globals.h
    branches/bleeding_edge/src/heap.cc
    branches/bleeding_edge/src/heap.h
    branches/bleeding_edge/src/log.cc
    branches/bleeding_edge/src/platform-freebsd.cc
    branches/bleeding_edge/src/platform-linux.cc
    branches/bleeding_edge/src/platform-macos.cc
    branches/bleeding_edge/src/platform-nullos.cc
    branches/bleeding_edge/src/platform-win32.cc
    branches/bleeding_edge/src/platform.h
    branches/bleeding_edge/src/spaces-inl.h
    branches/bleeding_edge/src/spaces.cc
    branches/bleeding_edge/src/spaces.h
    branches/bleeding_edge/tools/test.py

Log:
Add basic infrastructure for protecting V8's heap when leaving the VM
and unprotecting it when (re)entering.  The functionality is enabled
by the flag --protect-heap and requires V8 to be built with
ENABLE_HEAP_PROTECTION and ENABLE_LOGGING_AND_PROFILING defined.

Implemented on Linux and Windows but not yet for other platforms.

Review URL: http://codereview.chromium.org/53004

Modified: branches/bleeding_edge/src/flag-definitions.h
==============================================================================
--- branches/bleeding_edge/src/flag-definitions.h       (original)
+++ branches/bleeding_edge/src/flag-definitions.h       Tue Mar 24 05:47:53 2009
@@ -337,8 +337,20 @@
  DEFINE_bool(sliding_state_window, false,
              "Update sliding state window counters.")
  DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
-DEFINE_bool(oprofile, false,
-            "Enable JIT agent for OProfile.")
+DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
+
+//
+// Heap protection flags
+// Using heap protection requires ENABLE_LOGGING_AND_PROFILING as well.
+//
+#ifdef ENABLE_HEAP_PROTECTION
+#undef FLAG
+#define FLAG FLAG_FULL
+
+DEFINE_bool(protect_heap, false,
+            "Protect/unprotect V8's heap when leaving/entring the VM.")
+
+#endif

  //
  // Disassembler only flags

Modified: branches/bleeding_edge/src/globals.h
==============================================================================
--- branches/bleeding_edge/src/globals.h        (original)
+++ branches/bleeding_edge/src/globals.h        Tue Mar 24 05:47:53 2009
@@ -359,7 +359,8 @@
    V(JS)                   \
    V(GC)                   \
    V(COMPILER)             \
-  V(OTHER)
+  V(OTHER)                \
+  V(EXTERNAL)

  enum StateTag {
  #define DEF_STATE_TAG(name) name,

Modified: branches/bleeding_edge/src/heap.cc
==============================================================================
--- branches/bleeding_edge/src/heap.cc  (original)
+++ branches/bleeding_edge/src/heap.cc  Tue Mar 24 05:47:53 2009
@@ -2861,6 +2861,30 @@
  }


+#ifdef ENABLE_HEAP_PROTECTION
+
+void Heap::Protect() {
+  new_space_.Protect();
+  map_space_->Protect();
+  old_pointer_space_->Protect();
+  old_data_space_->Protect();
+  code_space_->Protect();
+  lo_space_->Protect();
+}
+
+
+void Heap::Unprotect() {
+  new_space_.Unprotect();
+  map_space_->Unprotect();
+  old_pointer_space_->Unprotect();
+  old_data_space_->Unprotect();
+  code_space_->Unprotect();
+  lo_space_->Unprotect();
+}
+
+#endif
+
+
  #ifdef DEBUG

  class PrintHandleVisitor: public ObjectVisitor {

Modified: branches/bleeding_edge/src/heap.h
==============================================================================
--- branches/bleeding_edge/src/heap.h   (original)
+++ branches/bleeding_edge/src/heap.h   Tue Mar 24 05:47:53 2009
@@ -273,6 +273,12 @@
      return new_space_.allocation_limit_address();
    }

+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the heap by marking all spaces read-only/writable.
+  static void Protect();
+  static void Unprotect();
+#endif
+
    // Allocates and initializes a new JavaScript object based on a
    // constructor.
    // Returns Failure::RetryAfterGC(requested_bytes, space) if the  
allocation

Modified: branches/bleeding_edge/src/log.cc
==============================================================================
--- branches/bleeding_edge/src/log.cc   (original)
+++ branches/bleeding_edge/src/log.cc   Tue Mar 24 05:47:53 2009
@@ -29,10 +29,12 @@

  #include "v8.h"

+#include "bootstrapper.h"
  #include "log.h"
+#include "macro-assembler.h"
  #include "platform.h"
+#include "serialize.h"
  #include "string-stream.h"
-#include "macro-assembler.h"

  namespace v8 { namespace internal {

@@ -1115,10 +1117,23 @@

    if (FLAG_log_state_changes) {
      LOG(UncheckedStringEvent("Entering", StateToString(state_)));
-    if (previous_) {
+    if (previous_ != NULL) {
        LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
      }
    }
+
+#ifdef ENABLE_HEAP_PROTECTION
+  if (FLAG_protect_heap && previous_ != NULL) {
+    if (state_ == EXTERNAL) {
+      // We are leaving V8.
+      ASSERT(previous_ == NULL || previous_->state_ != EXTERNAL);
+      Heap::Protect();
+    } else {
+      // Are we entering V8?
+      if (previous_->state_ == EXTERNAL) Heap::Unprotect();
+    }
+  }
+#endif
  }


@@ -1127,10 +1142,22 @@

    if (FLAG_log_state_changes) {
      LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
-    if (previous_) {
+    if (previous_ != NULL) {
        LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
      }
    }
+
+#ifdef ENABLE_HEAP_PROTECTION
+  if (FLAG_protect_heap && previous_ != NULL) {
+    if (state_ == EXTERNAL) {
+      // Are we (re)entering V8?
+      if (previous_->state_ != EXTERNAL) Heap::Unprotect();
+    } else {
+      // Are we leaving V8?
+      if (previous_->state_ == EXTERNAL) Heap::Protect();
+    }
+  }
+#endif
  }
  #endif


Modified: branches/bleeding_edge/src/platform-freebsd.cc
==============================================================================
--- branches/bleeding_edge/src/platform-freebsd.cc      (original)
+++ branches/bleeding_edge/src/platform-freebsd.cc      Tue Mar 24 05:47:53 2009
@@ -257,6 +257,20 @@
  }


+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  UNIMPLEMENTED();
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  UNIMPLEMENTED();
+}
+
+#endif
+
+
  void OS::Sleep(int milliseconds) {
    unsigned int ms = static_cast<unsigned int>(milliseconds);
    usleep(1000 * ms);

Modified: branches/bleeding_edge/src/platform-linux.cc
==============================================================================
--- branches/bleeding_edge/src/platform-linux.cc        (original)
+++ branches/bleeding_edge/src/platform-linux.cc        Tue Mar 24 05:47:53 2009
@@ -234,9 +234,9 @@

  void* OS::Allocate(const size_t requested,
                     size_t* allocated,
-                   bool executable) {
+                   bool is_executable) {
    const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1,  
0);
    if (mbase == MAP_FAILED) {
      LOG(StringEvent("OS::Allocate", "mmap failed"));
@@ -248,12 +248,29 @@
  }


-void OS::Free(void* buf, const size_t length) {
+void OS::Free(void* address, const size_t size) {
    // TODO(1240712): munmap has a return value which is ignored here.
-  munmap(buf, length);
+  munmap(address, size);
  }


+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  // TODO(1240712): mprotect has a return value which is ignored here.
+  mprotect(address, size, PROT_READ);
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  // TODO(1240712): mprotect has a return value which is ignored here.
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  mprotect(address, size, prot);
+}
+
+#endif
+
+
  void OS::Sleep(int milliseconds) {
    unsigned int ms = static_cast<unsigned int>(milliseconds);
    usleep(1000 * ms);
@@ -267,7 +284,7 @@


  void OS::DebugBreak() {
-#if defined (__arm__) || defined(__thumb__)
+#ifdef ARM
    asm("bkpt 0");
  #else
    asm("int $3");
@@ -418,8 +435,8 @@
  }


-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable)  
{
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    if (MAP_FAILED == mmap(address, size, prot,
                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
                           kMmapFd, kMmapFdOffset)) {

Modified: branches/bleeding_edge/src/platform-macos.cc
==============================================================================
--- branches/bleeding_edge/src/platform-macos.cc        (original)
+++ branches/bleeding_edge/src/platform-macos.cc        Tue Mar 24 05:47:53 2009
@@ -228,9 +228,9 @@

  void* OS::Allocate(const size_t requested,
                     size_t* allocated,
-                   bool executable) {
+                   bool is_executable) {
    const size_t msize = RoundUp(requested, getpagesize());
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
    if (mbase == MAP_FAILED) {
      LOG(StringEvent("OS::Allocate", "mmap failed"));
@@ -242,12 +242,26 @@
  }


-void OS::Free(void* buf, const size_t length) {
+void OS::Free(void* address, const size_t size) {
    // TODO(1240712): munmap has a return value which is ignored here.
-  munmap(buf, length);
+  munmap(address, size);
  }


+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  UNIMPLEMENTED();
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  UNIMPLEMENTED();
+}
+
+#endif
+
+
  void OS::Sleep(int milliseconds) {
    usleep(1000 * milliseconds);
  }
@@ -370,8 +384,8 @@
  }


-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable)  
{
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    if (MAP_FAILED == mmap(address, size, prot,
                           MAP_PRIVATE | MAP_ANON | MAP_FIXED,
                           kMmapFd, kMmapFdOffset)) {
@@ -388,6 +402,7 @@
                MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
                kMmapFd, kMmapFdOffset) != MAP_FAILED;
  }
+

  class ThreadHandle::PlatformData : public Malloced {
   public:

Modified: branches/bleeding_edge/src/platform-nullos.cc
==============================================================================
--- branches/bleeding_edge/src/platform-nullos.cc       (original)
+++ branches/bleeding_edge/src/platform-nullos.cc       Tue Mar 24 05:47:53 2009
@@ -173,6 +173,20 @@
  }


+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  UNIMPLEMENTED();
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  UNIMPLEMENTED();
+}
+
+#endif
+
+
  void OS::Sleep(int milliseconds) {
    UNIMPLEMENTED();
  }

Modified: branches/bleeding_edge/src/platform-win32.cc
==============================================================================
--- branches/bleeding_edge/src/platform-win32.cc        (original)
+++ branches/bleeding_edge/src/platform-win32.cc        Tue Mar 24 05:47:53 2009
@@ -801,12 +801,12 @@

  void* OS::Allocate(const size_t requested,
                     size_t* allocated,
-                   bool executable) {
+                   bool is_executable) {
    // VirtualAlloc rounds allocated size to page size automatically.
    size_t msize = RoundUp(requested, GetPageSize());

    // Windows XP SP2 allows Data Excution Prevention (DEP).
-  int prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
    LPVOID mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
    if (mbase == NULL) {
      LOG(StringEvent("OS::Allocate", "VirtualAlloc failed"));
@@ -821,13 +821,32 @@
  }


-void OS::Free(void* buf, const size_t length) {
+void OS::Free(void* address, const size_t size) {
    // TODO(1240712): VirtualFree has a return value which is ignored here.
-  VirtualFree(buf, 0, MEM_RELEASE);
-  USE(length);
+  VirtualFree(address, 0, MEM_RELEASE);
+  USE(size);
  }


+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  // TODO(1240712): VirtualProtect has a return value which is ignored  
here.
+  DWORD old_protect;
+  VirtualProtect(address, size, PAGE_READONLY, &old_protect);
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  // TODO(1240712): VirtualProtect has a return value which is ignored  
here.
+  DWORD new_protect = is_executable ? PAGE_EXECUTE_READWRITE :  
PAGE_READWRITE;
+  DWORD old_protect;
+  VirtualProtect(address, size, new_protect, &old_protect);
+}
+
+#endif
+
+
  void OS::Sleep(int milliseconds) {
    ::Sleep(milliseconds);
  }
@@ -1299,8 +1318,8 @@
  }


-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable)  
{
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
    if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
      return false;
    }

Modified: branches/bleeding_edge/src/platform.h
==============================================================================
--- branches/bleeding_edge/src/platform.h       (original)
+++ branches/bleeding_edge/src/platform.h       Tue Mar 24 05:47:53 2009
@@ -168,11 +168,17 @@
    // Returns the address of allocated memory, or NULL if failed.
    static void* Allocate(const size_t requested,
                          size_t* allocated,
-                        bool executable);
-  static void Free(void* buf, const size_t length);
+                        bool is_executable);
+  static void Free(void* address, const size_t size);
    // Get the Alignment guaranteed by Allocate().
    static size_t AllocateAlignment();

+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect a block of memory by marking it read-only/writable.
+  static void Protect(void* address, size_t size);
+  static void Unprotect(void* address, size_t size, bool is_executable);
+#endif
+
    // Returns an indication of whether a pointer is in a space that
    // has been allocated by Allocate().  This method may conservatively
    // always return false, but giving more accurate information may
@@ -267,7 +273,7 @@
    size_t size() { return size_; }

    // Commits real memory. Returns whether the operation succeeded.
-  bool Commit(void* address, size_t size, bool executable);
+  bool Commit(void* address, size_t size, bool is_executable);

    // Uncommit real memory.  Returns whether the operation succeeded.
    bool Uncommit(void* address, size_t size);

Modified: branches/bleeding_edge/src/spaces-inl.h
==============================================================================
--- branches/bleeding_edge/src/spaces-inl.h     (original)
+++ branches/bleeding_edge/src/spaces-inl.h     Tue Mar 24 05:47:53 2009
@@ -219,6 +219,43 @@
  }


+bool MemoryAllocator::InInitialChunk(Address address) {
+  if (initial_chunk_ == NULL) return false;
+
+  Address start = static_cast<Address>(initial_chunk_->address());
+  return (start <= address) && (address < start + initial_chunk_->size());
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void MemoryAllocator::Protect(Address start, size_t size) {
+  OS::Protect(start, size);
+}
+
+
+void MemoryAllocator::Unprotect(Address start,
+                                size_t size,
+                                Executability executable) {
+  OS::Unprotect(start, size, executable);
+}
+
+
+void MemoryAllocator::ProtectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Protect(chunks_[id].address(), chunks_[id].size());
+}
+
+
+void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+                chunks_[id].owner()->executable() == EXECUTABLE);
+}
+
+#endif
+
+
  //  
--------------------------------------------------------------------------
  // PagedSpace


Modified: branches/bleeding_edge/src/spaces.cc
==============================================================================
--- branches/bleeding_edge/src/spaces.cc        (original)
+++ branches/bleeding_edge/src/spaces.cc        Tue Mar 24 05:47:53 2009
@@ -302,9 +302,8 @@
    *num_pages = PagesInChunk(start, size);
    ASSERT(*num_pages > 0);
    ASSERT(initial_chunk_ != NULL);
-  ASSERT(initial_chunk_->address() <= start);
-  ASSERT(start + size <=  
reinterpret_cast<Address>(initial_chunk_->address())
-                             + initial_chunk_->size());
+  ASSERT(InInitialChunk(start));
+  ASSERT(InInitialChunk(start + size - 1));
    if (!initial_chunk_->Commit(start, size, owner->executable() ==  
EXECUTABLE)) {
      return Page::FromAddress(NULL);
    }
@@ -325,9 +324,8 @@
    ASSERT(start != NULL);
    ASSERT(size > 0);
    ASSERT(initial_chunk_ != NULL);
-  ASSERT(initial_chunk_->address() <= start);
-  ASSERT(start + size <=  
reinterpret_cast<Address>(initial_chunk_->address())
-                             + initial_chunk_->size());
+  ASSERT(InInitialChunk(start));
+  ASSERT(InInitialChunk(start + size - 1));

    if (!initial_chunk_->Commit(start, size, executable)) return false;
    Counters::memory_allocated.Increment(size);
@@ -407,14 +405,7 @@
    // We cannot free a chunk contained in the initial chunk because it was  
not
    // allocated with AllocateRawMemory.  Instead we uncommit the virtual
    // memory.
-  bool in_initial_chunk = false;
-  if (initial_chunk_ != NULL) {
-    Address start = static_cast<Address>(initial_chunk_->address());
-    Address end = start + initial_chunk_->size();
-    in_initial_chunk = (start <= c.address()) && (c.address() < end);
-  }
-
-  if (in_initial_chunk) {
+  if (InInitialChunk(c.address())) {
      // TODO(1240712): VirtualMemory::Uncommit has a return value which
      // is ignored here.
      initial_chunk_->Uncommit(c.address(), c.size());
@@ -529,6 +520,28 @@
  }


+#ifdef ENABLE_HEAP_PROTECTION
+
+void PagedSpace::Protect() {
+  Page* page = first_page_;
+  while (page->is_valid()) {
+    MemoryAllocator::ProtectChunkFromPage(page);
+    page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
+  }
+}
+
+
+void PagedSpace::Unprotect() {
+  Page* page = first_page_;
+  while (page->is_valid()) {
+    MemoryAllocator::UnprotectChunkFromPage(page);
+    page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
+  }
+}
+
+#endif
+
+
  void PagedSpace::ClearRSet() {
    PageIterator it(this, PageIterator::ALL_PAGES);
    while (it.has_next()) {
@@ -834,6 +847,24 @@
  }


+#ifdef ENABLE_HEAP_PROTECTION
+
+void NewSpace::Protect() {
+  MemoryAllocator::Protect(ToSpaceLow(), Capacity());
+  MemoryAllocator::Protect(FromSpaceLow(), Capacity());
+}
+
+
+void NewSpace::Unprotect() {
+  MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
+                             to_space_.executable());
+  MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
+                             from_space_.executable());
+}
+
+#endif
+
+
  void NewSpace::Flip() {
    SemiSpace tmp = from_space_;
    from_space_ = to_space_;
@@ -2240,6 +2271,30 @@
    size_ = 0;
    page_count_ = 0;
  }
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void LargeObjectSpace::Protect() {
+  LargeObjectChunk* chunk = first_chunk_;
+  while (chunk != NULL) {
+    MemoryAllocator::Protect(chunk->address(), chunk->size());
+    chunk = chunk->next();
+  }
+}
+
+
+void LargeObjectSpace::Unprotect() {
+  LargeObjectChunk* chunk = first_chunk_;
+  while (chunk != NULL) {
+    bool is_code = chunk->GetObject()->IsCode();
+    MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
+                               is_code ? EXECUTABLE : NOT_EXECUTABLE);
+    chunk = chunk->next();
+  }
+}
+
+#endif


  Object* LargeObjectSpace::AllocateRawInternal(int requested_size,

Modified: branches/bleeding_edge/src/spaces.h
==============================================================================
--- branches/bleeding_edge/src/spaces.h (original)
+++ branches/bleeding_edge/src/spaces.h Tue Mar 24 05:47:53 2009
@@ -277,16 +277,22 @@
   public:
    Space(AllocationSpace id, Executability executable)
        : id_(id), executable_(executable) {}
+
    virtual ~Space() {}
+
    // Does the space need executable memory?
    Executability executable() { return executable_; }
+
    // Identity used in error reporting.
    AllocationSpace identity() { return id_; }
+
    virtual int Size() = 0;
+
  #ifdef DEBUG
    virtual void Verify() = 0;
    virtual void Print() = 0;
  #endif
+
   private:
    AllocationSpace id_;
    Executability executable_;
@@ -396,6 +402,17 @@
    static Page* FindFirstPageInSameChunk(Page* p);
    static Page* FindLastPageInSameChunk(Page* p);

+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect a block of memory by marking it read-only/writable.
+  static inline void Protect(Address start, size_t size);
+  static inline void Unprotect(Address start, size_t size,
+                               Executability executable);
+
+  // Protect/unprotect a chunk given a page in the chunk.
+  static inline void ProtectChunkFromPage(Page* page);
+  static inline void UnprotectChunkFromPage(Page* page);
+#endif
+
  #ifdef DEBUG
    // Reports statistic info of the space.
    static void ReportStatistics();
@@ -460,6 +477,9 @@
    // Returns the chunk id that a page belongs to.
    static inline int GetChunkId(Page* p);

+  // True if the address lies in the initial chunk.
+  static inline bool InInitialChunk(Address address);
+
    // Initializes pages in a chunk. Returns the first page address.
    // This function and GetChunkId() are provided for the mark-compact
    // collector to rebuild page headers in the from space, which is
@@ -669,7 +689,6 @@


  class PagedSpace : public Space {
-  friend class PageIterator;
   public:
    // Creates a space with a maximum capacity, and an id.
    PagedSpace(int max_capacity, AllocationSpace id, Executability  
executable);
@@ -764,6 +783,12 @@
    // Ensures that the capacity is at least 'capacity'. Returns false on  
failure.
    bool EnsureCapacity(int capacity);

+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the space by marking it read-only/writable.
+  void Protect();
+  void Unprotect();
+#endif
+
  #ifdef DEBUG
    // Print meta info and objects in this space.
    virtual void Print();
@@ -834,6 +859,8 @@
    // Returns the number of total pages in this space.
    int CountTotalPages();
  #endif
+
+  friend class PageIterator;
  };


@@ -1117,6 +1144,12 @@
    bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
    bool FromSpaceContains(Address a) { return from_space_.Contains(a); }

+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the space by marking it read-only/writable.
+  virtual void Protect();
+  virtual void Unprotect();
+#endif
+
  #ifdef DEBUG
    // Verify the active semispace.
    virtual void Verify();
@@ -1554,7 +1587,6 @@


  class LargeObjectSpace : public Space {
-  friend class LargeObjectIterator;
   public:
    explicit LargeObjectSpace(AllocationSpace id);
    virtual ~LargeObjectSpace() {}
@@ -1606,6 +1638,12 @@
    // Checks whether the space is empty.
    bool IsEmpty() { return first_chunk_ == NULL; }

+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the space by marking it read-only/writable.
+  void Protect();
+  void Unprotect();
+#endif
+
  #ifdef DEBUG
    virtual void Verify();
    virtual void Print();
@@ -1634,6 +1672,8 @@
    // Returns the number of extra bytes (rounded up to the nearest full  
word)
    // required for extra_object_bytes of extra pointers (in bytes).
    static inline int ExtraRSetBytesFor(int extra_object_bytes);
+
+  friend class LargeObjectIterator;

   public:
    TRACK_MEMORY("LargeObjectSpace")

Modified: branches/bleeding_edge/tools/test.py
==============================================================================
--- branches/bleeding_edge/tools/test.py        (original)
+++ branches/bleeding_edge/tools/test.py        Tue Mar 24 05:47:53 2009
@@ -163,7 +163,7 @@
          print failed.output.stdout.strip()
        print "Command: %s" % EscapeCommand(failed.command)
        if failed.HasCrashed():
-        print "--- CRASHED ---"
+        print "--- CRASHED ---"
      if len(self.failed) == 0:
        print "==="
        print "=== All tests succeeded"
@@ -244,7 +244,7 @@
          print self.templates['stderr'] % stderr
        print "Command: %s" % EscapeCommand(output.command)
        if output.HasCrashed():
-        print "--- CRASHED ---"
+        print "--- CRASHED ---"

    def Truncate(self, str, length):
      if length and (len(str) > (length - 3)):
@@ -345,7 +345,7 @@

    def GetSource(self):
      return "(no source available)"
-
+
    def RunCommand(self, command):
      full_command = self.context.processor(command)
      output = Execute(full_command, self.context, self.context.timeout)
@@ -411,7 +411,7 @@
    except ImportError:
      pass
    return prev_error_mode
-
+
  def RunProcess(context, timeout, args, **rest):
    if context.verbose: print "#", " ".join(args)
    popen_args = args

--~--~---------~--~----~------------~-------~--~----~
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
-~----------~----~----~----~------~----~------~--~---

Reply via email to