Reviewers: Erik Corry,

Description:
Fix leakage of virtual address space on Linux platform.

Ensure that unmap return values are checked in debug mode.

[email protected]
BUG=v8:1701


Please review this at http://codereview.chromium.org/8060052/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files:
  M src/platform-linux.cc
  M src/platform-macos.cc
  M src/platform-openbsd.cc
  M src/platform-win32.cc
  M src/platform.h
  M src/spaces.cc
  M test/mjsunit/mjsunit.status


Index: src/platform-linux.cc
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 2d6160e473f12c89f67693e44759622b9a538bc6..855ebf78e16fe14beba51d616c84a83bd03df9f3 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -477,7 +477,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,


 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }

@@ -559,7 +559,7 @@ void OS::SignalCodeMovingGC() {
   void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
                     fileno(f), 0);
   ASSERT(addr != MAP_FAILED);
-  munmap(addr, size);
+  OS::Free(addr, size);
   fclose(f);
 }

@@ -621,21 +621,31 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
                            kMmapFd,
                            kMmapFdOffset);
   if (reservation == MAP_FAILED) return;
+
   Address base = static_cast<Address>(reservation);
   Address aligned_base = RoundUp(base, alignment);
-  ASSERT(base <= aligned_base);
+  ASSERT_LE(base, aligned_base);

   // Unmap extra memory reserved before and after the desired block.
-  size_t bytes_prior = static_cast<size_t>(aligned_base - base);
-  if (bytes_prior > 0) {
-    munmap(base, bytes_prior);
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
   }
-  if (static_cast<size_t>(aligned_base - base) < request_size - size) {
-    munmap(aligned_base + size, request_size - size - bytes_prior);
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
   }

+  ASSERT(aligned_size == request_size);
+
   address_ = static_cast<void*>(aligned_base);
-  size_ = size;
+  size_ = aligned_size;
 }


Index: src/platform-macos.cc
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 0ac8fbe2d74771226ed470974fe70d166f3d35e8..a70b43c0276b2bc3425f3782789d83e814828860 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -228,7 +228,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,


 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }

@@ -353,21 +353,31 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
                            kMmapFd,
                            kMmapFdOffset);
   if (reservation == MAP_FAILED) return;
+
   Address base = static_cast<Address>(reservation);
   Address aligned_base = RoundUp(base, alignment);
-  ASSERT(base <= aligned_base);
+  ASSERT_LE(base, aligned_base);

   // Unmap extra memory reserved before and after the desired block.
-  size_t bytes_prior = static_cast<size_t>(aligned_base - base);
-  if (bytes_prior > 0) {
-    munmap(base, bytes_prior);
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
   }
-  if (static_cast<size_t>(aligned_base - base) < request_size - size) {
-    munmap(aligned_base + size, request_size - size - bytes_prior);
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
   }

+  ASSERT(aligned_size == request_size);
+
   address_ = static_cast<void*>(aligned_base);
-  size_ = size;
+  size_ = aligned_size;
 }


Index: src/platform-openbsd.cc
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 973329b9b109b3b145444e6a3a9dc650ba659d1b..3151d18053c0cb6b3442d72aeccde5f41e7ea227 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -245,7 +245,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,


 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }

@@ -342,7 +342,8 @@ VirtualMemory::VirtualMemory(size_t size) {

 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    OS::Free(address(), size());
+    address_ = MAP_FAILED
   }
 }

Index: src/platform-win32.cc
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 46af4dcdcd22b126fbaaa528bffbee655a0299ea..8771c43679376cef6cf58d936f80e8e8c9471aeb 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1413,7 +1413,9 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
   if (address == NULL) return;
   Address base = RoundUp(static_cast<Address>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
-  ReleaseRegion(address, request_size);
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  ASSERT(result);
   address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
   if (address != NULL) {
     request_size = size;
Index: src/platform.h
diff --git a/src/platform.h b/src/platform.h
index 85901240f3d0740e86d1f6569710906e8754da1c..99deb1bef02394d97eaa2f687f10b16dbca8992b 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -351,12 +351,14 @@ class VirtualMemory {

   void Release() {
     ASSERT(IsReserved());
- // Notice: Order is somportant here. The VirtualMemory object might live
+    // Notice: Order is important here. The VirtualMemory object might live
     // inside the allocated region.
     void* address = address_;
     size_t size = size_;
     Reset();
-    ReleaseRegion(address, size);
+    bool result = ReleaseRegion(address, size);
+    USE(result);
+    ASSERT(result);
   }

// Assign control of the reserved region to a different VirtualMemory object.
Index: src/spaces.cc
diff --git a/src/spaces.cc b/src/spaces.cc
index 9ae2e625d042076a279512f7b3223d2b884df1e3..0dc417d79149456f8eefe203d3df1eb460ceb6e7 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -341,7 +341,9 @@ void MemoryAllocator::FreeMemory(Address base,
     isolate_->code_range()->FreeRawMemory(base, size);
   } else {
ASSERT(executable == NOT_EXECUTABLE | | !isolate_->code_range()->exists());
-    VirtualMemory::ReleaseRegion(base, size);
+    bool result = VirtualMemory::ReleaseRegion(base, size);
+    USE(result);
+    ASSERT(result);
   }
 }

Index: test/mjsunit/mjsunit.status
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 966fe0a7e7cb2f236e6dc3e63f500812ba8981c2..941e0e8cc5be94580c002afbb6dda988fd32d0ad 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -35,8 +35,6 @@ bugs: FAIL
 regress/regress-1119: FAIL

##############################################################################
-# NewGC: http://code.google.com/p/v8/issues/detail?id=1701
-array-join: SKIP

 # NewGC: BUG(1719) slow to collect arrays over several contexts.
 regress/regress-524: SKIP


--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to