Revision: 8888
Author:   [email protected]
Date:     Thu Aug 11 01:44:32 2011
Log:      ARM: Support record write stubs for incremental marking.

[email protected]
BUG=v8:1456

Review URL: http://codereview.chromium.org/7605026
http://code.google.com/p/v8/source/detail?r=8888

Modified:
 /branches/experimental/gc/src/arm/assembler-arm.h
 /branches/experimental/gc/src/arm/code-stubs-arm.cc
 /branches/experimental/gc/src/arm/code-stubs-arm.h
 /branches/experimental/gc/src/arm/macro-assembler-arm.cc

=======================================
--- /branches/experimental/gc/src/arm/assembler-arm.h Wed Aug 3 09:10:10 2011 +++ /branches/experimental/gc/src/arm/assembler-arm.h Thu Aug 11 01:44:32 2011
@@ -1206,6 +1206,10 @@
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }

   // Read/patch instructions
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   static void instr_at_put(byte* pc, Instr instr) {
     *reinterpret_cast<Instr*>(pc) = instr;
@@ -1259,12 +1263,6 @@
   bool emit_debug_code() const { return emit_debug_code_; }

   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
-  // Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
-  void instr_at_put(int pos, Instr instr) {
-    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
-  }

   // Decode branch instruction at pos and return branch target pos
   int target_at(int pos);
=======================================
--- /branches/experimental/gc/src/arm/code-stubs-arm.cc Wed Aug 10 05:50:30 2011 +++ /branches/experimental/gc/src/arm/code-stubs-arm.cc Thu Aug 11 01:44:32 2011
@@ -6640,11 +6640,133 @@
// we keep the GC informed. The word in the object where the value has been
 // written is in the address register.
 void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ b(&skip_to_incremental_noncompacting);
+  __ b(&skip_to_incremental_compacting);
+
   if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
     __ RememberedSetHelper(
address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd);
   }
   __ Ret();
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+  PatchBranchIntoNop(masm, 0);
+  PatchBranchIntoNop(masm, Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     MemoryChunk::SCAN_ON_SCAVENGE,
+                     ne,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(
+ address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  Register address =
+      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+  ASSERT(!address.is(regs_.object()));
+  ASSERT(!address.is(r0));
+  __ Move(address, regs_.address());
+  __ Move(r0, regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ Move(r1, address);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ ldr(r1, MemOperand(address, 0));
+  }
+  __ mov(r2, Operand(ExternalReference::isolate_address()));
+
+ // TODO(gc): Create a fast version of this C function that does not duplicate
+  // the checks done in the stub.
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need) {
+  Label on_black;
+
+ // Let's look at the color of the object: If it is not black we don't have
+  // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+  regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(
+ address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&on_black);
+
+  // TODO(gc): Add call to EnsureNotWhite here.
+
+  // Fall through when we need to inform the incremental marker.
 }


=======================================
--- /branches/experimental/gc/src/arm/code-stubs-arm.h Wed Aug 3 09:10:10 2011 +++ /branches/experimental/gc/src/arm/code-stubs-arm.h Thu Aug 11 01:44:32 2011
@@ -409,12 +409,58 @@
     INCREMENTAL_COMPACTION
   };

+  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
+    ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
+  }
+
+  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
+    ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
+  }
+
   static Mode GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start()); + Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+                                                   Assembler::kInstrSize);
+
+    if (Assembler::IsBranch(first_instruction)) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(Assembler::IsTstImmediate(first_instruction));
+
+    if (Assembler::IsBranch(second_instruction)) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(Assembler::IsTstImmediate(second_instruction));
+
     return STORE_BUFFER_ONLY;
   }

   static void Patch(Code* stub, Mode mode) {
-    ASSERT(mode == STORE_BUFFER_ONLY);
+    MacroAssembler masm(NULL,
+                        stub->instruction_start(),
+                        stub->instruction_size());
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        PatchBranchIntoNop(&masm, 0);
+        PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 0);
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
   }

  private:
@@ -429,11 +475,28 @@
         : object_(object),
           address_(address),
           scratch0_(scratch0) {
-    }
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      // We don't have to save scratch0_ because it was given to us as
+      // a scratch register.
+      masm->push(scratch1_);
+    }
+
+    void Restore(MacroAssembler* masm) {
+      masm->pop(scratch1_);
+    }
+
+ // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. The scratch registers + // will be restored by other means so we don't bother pushing them here. void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
-      UNREACHABLE();  // TODO(gc): Save the caller save registers.
+      masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
       if (mode == kSaveFPRegs) {
-        CpuFeatures::Scope scope(SSE2);
+        CpuFeatures::Scope scope(VFP3);
         masm->sub(sp,
                   sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
@@ -448,7 +511,7 @@
     inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
                                            SaveFPRegsMode mode) {
       if (mode == kSaveFPRegs) {
-        CpuFeatures::Scope scope(SSE2);
+        CpuFeatures::Scope scope(VFP3);
         // Restore all VFP registers except d0.
         for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
           DwVfpRegister reg = DwVfpRegister::from_code(i);
@@ -458,17 +521,19 @@
                   sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
       }
-      UNREACHABLE();  // TODO(gc): Restore the caller save registers.
+      masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
     }

     inline Register object() { return object_; }
     inline Register address() { return address_; }
     inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }

    private:
     Register object_;
     Register address_;
     Register scratch0_;
+    Register scratch1_;

     Register GetRegThatIsNotOneOf(Register r1,
                                   Register r2,
@@ -492,11 +557,11 @@
   };

   void Generate(MacroAssembler* masm);
-  void GenerateIncremental(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
       OnNoNeedToInformIncrementalMarker on_no_need);
-  void InformIncrementalMarker(MacroAssembler* masm);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);

   Major MajorKey() { return RecordWrite; }

=======================================
--- /branches/experimental/gc/src/arm/macro-assembler-arm.cc Wed Aug 3 09:10:10 2011 +++ /branches/experimental/gc/src/arm/macro-assembler-arm.cc Thu Aug 11 01:44:32 2011
@@ -439,10 +439,11 @@
   // of the object, so so offset must be a multiple of kPointerSize.
   ASSERT(IsAligned(offset, kPointerSize));

-  add(dst, object, Operand(offset));
+  add(dst, object, Operand(offset - kHeapObjectTag));
   if (emit_debug_code()) {
     Label ok;
-    JumpIfNotSmi(dst, &ok);
+    tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+    b(eq, &ok);
     stop("Unaligned cell in write barrier");
     bind(&ok);
   }

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to