Revision: 2608
Author: [email protected]
Date: Mon Aug  3 06:17:34 2009
Log: Removed unsafe optimization in RecordWrite.
Optimization was only unsafe if new-space was in the low half of memory and  
an object could be
located in the top half at an addressed that only differ from a new-space  
address by the high
bit.

Review URL: http://codereview.chromium.org/159784

http://code.google.com/p/v8/source/detail?r=2608

Modified:
  /branches/bleeding_edge/src/ia32/assembler-ia32.h
  /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc

=======================================
--- /branches/bleeding_edge/src/ia32/assembler-ia32.h   Thu Jul 16 21:57:17  
2009
+++ /branches/bleeding_edge/src/ia32/assembler-ia32.h   Mon Aug  3 06:17:34  
2009
@@ -226,7 +226,9 @@
    times_1 = 0,
    times_2 = 1,
    times_4 = 2,
-  times_8 = 3
+  times_8 = 3,
+  times_pointer_size = times_4,
+  times_half_pointer_size = times_2
  };


=======================================
--- /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc    Fri Jul 17  
05:12:24 2009
+++ /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc    Mon Aug  3  
06:17:34 2009
@@ -146,43 +146,30 @@
    // for the remembered set bits.
    Label done;

-  // This optimization cannot survive serialization and deserialization,
-  // so we disable as long as serialization can take place.
-  int32_t new_space_start =
-       
reinterpret_cast<int32_t>(ExternalReference::new_space_start().address());
-  if (Serializer::enabled() || new_space_start < 0) {
-    // Cannot do smart bit-twiddling. Need to do two consecutive checks.
-    // Check for Smi first.
-    test(value, Immediate(kSmiTagMask));
-    j(zero, &done);
-    // Test that the object address is not in the new space.  We cannot
-    // set remembered set bits in the new space.
+  // Skip barrier if writing a smi.
+  ASSERT_EQ(0, kSmiTag);
+  test(value, Immediate(kSmiTagMask));
+  j(zero, &done);
+
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get  
serialized.
      mov(value, Operand(object));
      and_(value, Heap::NewSpaceMask());
      cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
      j(equal, &done);
    } else {
-    // move the value SmiTag into the sign bit
-    shl(value, 31);
-    // combine the object with value SmiTag
-    or_(value, Operand(object));
-    // remove the uninteresing bits inside the page
-    and_(value, Heap::NewSpaceMask() | (1 << 31));
-    // xor has two effects:
-    // - if the value was a smi, then the result will be negative
-    // - if the object is pointing into new space area the page bits will
-    //   all be zero
-    xor_(value, new_space_start | (1 << 31));
-    // Check for both conditions in one branch
-    j(less_equal, &done);
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start().address());
+    lea(value, Operand(object, -new_space_start));
+    and_(value, Heap::NewSpaceMask());
+    j(equal, &done);
    }

    if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
      // Compute the bit offset in the remembered set, leave it in 'value'.
-    mov(value, Operand(object));
+    lea(value, Operand(object, offset));
      and_(value, Page::kPageAlignmentMask);
-    add(Operand(value), Immediate(offset));
-    shr(value, kObjectAlignmentBits);
+    shr(value, kPointerSizeLog2);

      // Compute the page address from the heap object pointer, leave it in
      // 'object'.
@@ -192,7 +179,7 @@
      // to limit code size. We should probably evaluate this decision by
      // measuring the performance of an equivalent implementation using
      // "simpler" instructions
-    bts(Operand(object, 0), value);
+    bts(Operand(object, Page::kRSetOffset), value);
    } else {
      Register dst = scratch;
      if (offset != 0) {
@@ -201,7 +188,9 @@
        // array access: calculate the destination address in the same  
manner as
        // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an  
offset
        // into an array of words.
-      lea(dst, Operand(object, dst, times_2,
+      ASSERT_EQ(1, kSmiTagSize);
+      ASSERT_EQ(0, kSmiTag);
+      lea(dst, Operand(object, dst, times_half_pointer_size,
                         FixedArray::kHeaderSize - kHeapObjectTag));
      }
      // If we are already generating a shared stub, not inlining the

--~--~---------~--~----~------------~-------~--~----~
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
-~----------~----~----~----~------~----~------~--~---

Reply via email to