Revision: 8770
Author:   [email protected]
Date:     Mon Aug  1 07:06:30 2011
Log:      Make Win64 compile.

Review URL: http://codereview.chromium.org/7389008
http://code.google.com/p/v8/source/detail?r=8770

Modified:
 /branches/experimental/gc/src/heap.h
 /branches/experimental/gc/src/ic.cc
 /branches/experimental/gc/src/incremental-marking.cc
 /branches/experimental/gc/src/isolate.h
 /branches/experimental/gc/src/mark-compact.cc
 /branches/experimental/gc/src/mark-compact.h
 /branches/experimental/gc/src/serialize.cc
 /branches/experimental/gc/src/spaces-inl.h
 /branches/experimental/gc/src/spaces.cc
 /branches/experimental/gc/src/spaces.h
 /branches/experimental/gc/src/store-buffer.h
 /branches/experimental/gc/src/utils.h

=======================================
--- /branches/experimental/gc/src/heap.h        Tue Jul 12 16:04:25 2011
+++ /branches/experimental/gc/src/heap.h        Mon Aug  1 07:06:30 2011
@@ -1202,7 +1202,7 @@
            (PromotedSpaceSize() + PromotedExternalMemorySize());
   }

-  inline void LowerOldGenLimits(int bytes) {
+  inline void LowerOldGenLimits(intptr_t bytes) {
     old_gen_promotion_limit_ -= bytes;
     old_gen_allocation_limit_ -= bytes;
   }
=======================================
--- /branches/experimental/gc/src/ic.cc Tue Jul 12 16:04:25 2011
+++ /branches/experimental/gc/src/ic.cc Mon Aug  1 07:06:30 2011
@@ -88,7 +88,8 @@
       // function and the original code.
       JSFunction* function = JSFunction::cast(frame->function());
       function->PrintName();
-      int code_offset = address() - js_code->instruction_start();
+      int code_offset =
+          static_cast<int>(address() - js_code->instruction_start());
       PrintF("+%d", code_offset);
     } else {
       PrintF("<unknown>");
=======================================
--- /branches/experimental/gc/src/incremental-marking.cc Tue Jul 12 16:04:25 2011 +++ /branches/experimental/gc/src/incremental-marking.cc Mon Aug 1 07:06:30 2011
@@ -397,7 +397,7 @@

   // Initialize marking stack.
   Address addr = static_cast<Address>(marking_deque_memory_->address());
-  int size = marking_deque_memory_->size();
+  size_t size = marking_deque_memory_->size();
   if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
   marking_deque_.Initialize(addr, addr + size);

@@ -432,11 +432,11 @@
 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
   if (!IsMarking()) return;

-  intptr_t current = marking_deque_.bottom();
-  intptr_t mask = marking_deque_.mask();
-  intptr_t limit = marking_deque_.top();
+  int current = marking_deque_.bottom();
+  int mask = marking_deque_.mask();
+  int limit = marking_deque_.top();
   HeapObject** array = marking_deque_.array();
-  intptr_t new_top = current;
+  int new_top = current;

   Map* filler_map = heap_->one_pointer_filler_map();

=======================================
--- /branches/experimental/gc/src/isolate.h     Tue Jul 12 16:04:25 2011
+++ /branches/experimental/gc/src/isolate.h     Mon Aug  1 07:06:30 2011
@@ -772,7 +772,7 @@
     return name##_;                                                     \
   }                                                                     \
   inline void set_##name(type value) {                                  \
-    ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
+    ASSERT_EQ(OFFSET_OF(Isolate, name##_), name##_debug_offset_);       \
     name##_ = value;                                                    \
   }
   ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
=======================================
--- /branches/experimental/gc/src/mark-compact.cc       Fri Jul 15 04:09:41 2011
+++ /branches/experimental/gc/src/mark-compact.cc       Mon Aug  1 07:06:30 2011
@@ -2561,7 +2561,7 @@
     for ( ; live_objects != 0; live_objects--) {
Address free_end = object_address + offsets[live_index++] * kPointerSize;
       if (free_end != free_start) {
-        space->Free(free_start, free_end - free_start);
+        space->Free(free_start, static_cast<int>(free_end - free_start));
       }
       HeapObject* live_object = HeapObject::FromAddress(free_end);
       ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
@@ -2572,7 +2572,7 @@
     }
   }
   if (free_start != p->ObjectAreaEnd()) {
-    space->Free(free_start, p->ObjectAreaEnd() - free_start);
+ space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
   }
 }

@@ -3006,10 +3006,10 @@
// because it means that any FreeSpace maps left actually describe a region of
 // memory that can be ignored when scanning.  Dead objects other than free
 // spaces will not contain the free space map.
-int MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());

-  int freed_bytes = 0;
+  intptr_t freed_bytes = 0;

   MarkBit::CellType* cells = p->markbits()->cells();

@@ -3031,9 +3031,10 @@
        cell_index++, block_address += 32 * kPointerSize) {
     if (cells[cell_index] != 0) break;
   }
-  int size = block_address - p->ObjectAreaStart();
+  size_t size = block_address - p->ObjectAreaStart();
   if (cell_index == last_cell_index) {
-    freed_bytes += space->Free(p->ObjectAreaStart(), size);
+    freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
+                                                static_cast<int>(size)));
     return freed_bytes;
   }
// Grow the size of the start-of-page free space a little to get up to the
@@ -3041,7 +3042,8 @@
   Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
   // Free the first free space.
   size = free_end - p->ObjectAreaStart();
-  freed_bytes += space->Free(p->ObjectAreaStart(), size);
+  freed_bytes += space->Free(p->ObjectAreaStart(),
+                             static_cast<int>(size));
// The start of the current free area is represented in undigested form by // the address of the last 32-word section that contained a live object and // the marking bitmap for that cell, which describes where the live object
@@ -3070,7 +3072,8 @@
// so now we need to find the start of the first live object at the
           // end of the free space.
           free_end = StartOfLiveObject(block_address, cell);
-          freed_bytes += space->Free(free_start, free_end - free_start);
+          freed_bytes += space->Free(free_start,
+ static_cast<int>(free_end - free_start));
         }
       }
// Update our undigested record of where the current free area started.
@@ -3082,7 +3085,8 @@
   // Handle the free space at the end of the page.
   if (block_address - free_start > 32 * kPointerSize) {
     free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes += space->Free(free_start, block_address - free_start);
+    freed_bytes += space->Free(free_start,
+ static_cast<int>(block_address - free_start));
   }

   return freed_bytes;
@@ -3123,7 +3127,7 @@
     for ( ; live_objects != 0; live_objects--) {
Address free_end = object_address + offsets[live_index++] * kPointerSize;
       if (free_end != free_start) {
-        space->Free(free_start, free_end - free_start);
+        space->Free(free_start, static_cast<int>(free_end - free_start));
       }
       HeapObject* live_object = HeapObject::FromAddress(free_end);
       ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
@@ -3131,7 +3135,7 @@
     }
   }
   if (free_start != p->ObjectAreaEnd()) {
-    space->Free(free_start, p->ObjectAreaEnd() - free_start);
+ space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
   }
 }

@@ -3145,8 +3149,8 @@

   PageIterator it(space);

-  int freed_bytes = 0;
-  int newspace_size = space->heap()->new_space()->Size();
+  intptr_t freed_bytes = 0;
+  intptr_t newspace_size = space->heap()->new_space()->Size();

   while (it.has_next()) {
     Page* p = it.next();
=======================================
--- /branches/experimental/gc/src/mark-compact.h        Fri Jul 15 04:09:41 2011
+++ /branches/experimental/gc/src/mark-compact.h        Mon Aug  1 07:06:30 2011
@@ -188,7 +188,7 @@
     HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
     HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
     array_ = obj_low;
-    mask_ = RoundDownToPowerOf2(obj_high - obj_low) - 1;
+    mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
     top_ = bottom_ = 0;
     overflowed_ = false;
   }
@@ -248,10 +248,10 @@
   }

   HeapObject** array() { return array_; }
-  intptr_t bottom() { return bottom_; }
-  intptr_t top() { return top_; }
-  intptr_t mask() { return mask_; }
-  void set_top(intptr_t top) { top_ = top; }
+  int bottom() { return bottom_; }
+  int top() { return top_; }
+  int mask() { return mask_; }
+  void set_top(int top) { top_ = top; }

  private:
   HeapObject** array_;
@@ -301,7 +301,8 @@

   static int SizeOfChain(SlotsBuffer* buffer) {
     if (buffer == NULL) return 0;
-    return buffer->idx_ + (buffer->chain_length_ - 1) * kNumberOfElements;
+    return static_cast<int>(buffer->idx_ +
+ (buffer->chain_length_ - 1) * kNumberOfElements);
   }

   inline bool IsFull() {
@@ -441,7 +442,7 @@

   // Sweep a single page from the given space conservatively.
   // Return a number of reclaimed bytes.
-  static int SweepConservatively(PagedSpace* space, Page* p);
+  static intptr_t SweepConservatively(PagedSpace* space, Page* p);

   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
     return Page::FromAddress(reinterpret_cast<Address>(anchor))->
=======================================
--- /branches/experimental/gc/src/serialize.cc  Tue Jul 12 16:04:25 2011
+++ /branches/experimental/gc/src/serialize.cc  Mon Aug  1 07:06:30 2011
@@ -631,6 +631,7 @@
       maybe_new_allocation =
           reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
     }
+    ASSERT(!maybe_new_allocation->IsFailure());
     Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
     HeapObject* new_object = HeapObject::cast(new_allocation);
     address = new_object->address();
@@ -691,6 +692,7 @@

 void Deserializer::Deserialize() {
   isolate_ = Isolate::Current();
+  ASSERT(isolate_ != NULL);
   // Don't GC while deserializing - just expand the heap.
   Address* store_buffer_top =
       reinterpret_cast<Address*>(isolate_->heap()->store_buffer_top());
=======================================
--- /branches/experimental/gc/src/spaces-inl.h  Fri Jun 24 05:46:32 2011
+++ /branches/experimental/gc/src/spaces-inl.h  Mon Aug  1 07:06:30 2011
@@ -168,7 +168,8 @@
   ASSERT(chunk->owner() == owner);
   owner->IncreaseCapacity(Page::kObjectAreaSize);
   owner->Free(page->ObjectAreaStart(),
-              page->ObjectAreaEnd() - page->ObjectAreaStart());
+              static_cast<int>(page->ObjectAreaEnd() -
+                               page->ObjectAreaStart()));

   heap->incremental_marking()->SetOldSpacePageFlags(chunk);

@@ -301,13 +302,13 @@
       allocation_info_.limit = Min(
           allocation_info_.limit + inline_allocation_limit_step_,
           high);
-      int bytes_allocated = new_top - top_on_previous_step_;
+ int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
       heap()->incremental_marking()->Step(bytes_allocated);
       top_on_previous_step_ = new_top;
       return AllocateRawInternal(size_in_bytes);
     } else if (AddFreshPage()) {
       // Switched to new page. Try allocating again.
-      int bytes_allocated = old_top - top_on_previous_step_;
+ int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
       heap()->incremental_marking()->Step(bytes_allocated);
       top_on_previous_step_ = to_space_.page_low();
       return AllocateRawInternal(size_in_bytes);
=======================================
--- /branches/experimental/gc/src/spaces.cc     Tue Jul 12 16:04:25 2011
+++ /branches/experimental/gc/src/spaces.cc     Mon Aug  1 07:06:30 2011
@@ -160,7 +160,7 @@
   Address aligned_base =
       RoundUp(reinterpret_cast<Address>(code_range_->address()),
               MemoryChunk::kAlignment);
-  int size = code_range_->size() - (aligned_base - base);
+  size_t size = code_range_->size() - (aligned_base - base);
   allocation_list_.Add(FreeBlock(aligned_base, size));
   current_allocation_block_index_ = 0;
   return true;
@@ -320,7 +320,8 @@
   ASSERT(IsAligned(alignment, OS::AllocateAlignment()));
   if (size_ + requested > capacity_) return NULL;

- size_t allocated = RoundUp(requested + alignment, OS::AllocateAlignment());
+  size_t allocated = RoundUp(requested + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));

   Address base = reinterpret_cast<Address>(
       VirtualMemory::ReserveRegion(allocated));
@@ -515,7 +516,8 @@
 #ifdef DEBUG
   ZapBlock(base, chunk_size);
 #endif
-  isolate_->counters()->memory_allocated()->Increment(chunk_size);
+  isolate_->counters()->memory_allocated()->
+      Increment(static_cast<int>(chunk_size));

   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
   if (owner != NULL) {
@@ -1839,8 +1841,7 @@
   int bytes_left = new_node_size - size_in_bytes;
   ASSERT(bytes_left >= 0);

-  int old_linear_size = owner_->limit() - owner_->top();
-
+  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
   // Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
   // if it is big enough.
@@ -1966,7 +1967,7 @@
   // on the first allocation after the sweep.
   // Mark the old linear allocation area with a free space map so it can be
   // skipped when scanning the heap.
-  int old_linear_size = limit() - top();
+  int old_linear_size = static_cast<int>(limit() - top());
   Free(top(), old_linear_size);
   SetTop(NULL, NULL);

@@ -1996,7 +1997,7 @@
   if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
   if (new_area == NULL) return false;

-  int old_linear_size = limit() - top();
+  int old_linear_size = static_cast<int>(limit() - top());
   // Mark the old linear allocation area with a free space so it can be
// skipped when scanning the heap. This also puts it back in the free list
   // if it is big enough.
@@ -2018,7 +2019,7 @@
 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
   if (IsSweepingComplete()) return true;

-  int freed_bytes = 0;
+  intptr_t freed_bytes = 0;
   Page* last = last_unswept_page_->next_page();
   Page* p = first_unswept_page_;
   do {
@@ -2483,8 +2484,7 @@

   bool owned = (chunk->owner() == this);

-  SLOW_ASSERT(!owned
-              || !FindObject(address)->IsFailure());
+  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());

   return owned;
 }
=======================================
--- /branches/experimental/gc/src/spaces.h      Fri Jul  8 08:50:03 2011
+++ /branches/experimental/gc/src/spaces.h      Mon Aug  1 07:06:30 2011
@@ -352,7 +352,7 @@

   Address body_limit() { return address() + size(); }

-  int body_size() { return size() - kObjectStartOffset; }
+  int body_size() { return static_cast<int>(size() - kObjectStartOffset); }

   bool Contains(Address addr) {
     return addr >= body() && addr < address() + size();
=======================================
--- /branches/experimental/gc/src/store-buffer.h        Fri Jul  1 04:28:38 2011
+++ /branches/experimental/gc/src/store-buffer.h        Mon Aug  1 07:06:30 2011
@@ -84,7 +84,7 @@
   static const int kStoreBufferOverflowBit = 1 << 16;
   static const int kStoreBufferSize = kStoreBufferOverflowBit;
   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
-  static const int kOldStoreBufferLength = kStoreBufferLength * 64;
+  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
   static const int kHashMapLengthLog2 = 12;
   static const int kHashMapLength = 1 << kHashMapLengthLog2;

=======================================
--- /branches/experimental/gc/src/utils.h       Tue Jul 12 16:04:25 2011
+++ /branches/experimental/gc/src/utils.h       Mon Aug  1 07:06:30 2011
@@ -112,7 +112,7 @@

 // Return the largest multiple of m which is <= x.
 template <typename T>
-static inline T RoundDown(T x, int m) {
+static inline T RoundDown(T x, intptr_t m) {
   ASSERT(IsPowerOf2(m));
   return AddressFrom<T>(OffsetFrom(x) & -m);
 }
@@ -120,8 +120,8 @@

 // Return the smallest multiple of m which is >= x.
 template <typename T>
-static inline T RoundUp(T x, int m) {
-  return RoundDown(x + m - 1, m);
+static inline T RoundUp(T x, intptr_t m) {
+  return RoundDown<T>(static_cast<T>(x + m - 1), m);
 }


--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to