Revision: 20348
Author:   [email protected]
Date:     Mon Mar 31 11:59:29 2014 UTC
Log:      Fix LoadFieldByIndex to take mutable heap-numbers into account.

BUG=
[email protected]

Review URL: https://codereview.chromium.org/213213002
http://code.google.com/p/v8/source/detail?r=20348

Added:
 /branches/bleeding_edge/test/mjsunit/regress/regress-load-field-by-index.js
Modified:
 /branches/bleeding_edge/src/arm/lithium-arm.cc
 /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
 /branches/bleeding_edge/src/arm/lithium-codegen-arm.h
 /branches/bleeding_edge/src/arm64/lithium-arm64.cc
 /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc
 /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.h
 /branches/bleeding_edge/src/handles.cc
 /branches/bleeding_edge/src/hydrogen-instructions.h
 /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc
 /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.h
 /branches/bleeding_edge/src/ia32/lithium-ia32.cc
 /branches/bleeding_edge/src/objects.cc
 /branches/bleeding_edge/src/objects.h
 /branches/bleeding_edge/src/runtime.cc
 /branches/bleeding_edge/src/runtime.h
 /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc
 /branches/bleeding_edge/src/x64/lithium-codegen-x64.h
 /branches/bleeding_edge/src/x64/lithium-x64.cc
 /branches/bleeding_edge/test/mjsunit/fuzz-natives-part1.js

=======================================
--- /dev/null
+++ /branches/bleeding_edge/test/mjsunit/regress/regress-load-field-by-index.js Mon Mar 31 11:59:29 2014 UTC
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {a:1.5, b:{}};
+
+function f(o) {
+  var result = [];
+  for (var k in o) {
+    result[result.length] = o[k];
+  }
+  return result;
+}
+
+f(o);
+f(o);
+%OptimizeFunctionOnNextCall(f);
+var array = f(o);
+o.a = 1.7;
+assertEquals(1.5, array[0]);
=======================================
--- /branches/bleeding_edge/src/arm/lithium-arm.cc Mon Mar 31 11:58:53 2014 UTC +++ /branches/bleeding_edge/src/arm/lithium-arm.cc Mon Mar 31 11:59:29 2014 UTC
@@ -2539,7 +2539,9 @@
 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
   LOperand* object = UseRegister(instr->object());
   LOperand* index = UseRegister(instr->index());
-  return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
 }

 } }  // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Mon Mar 31 11:58:53 2014 UTC +++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Mon Mar 31 11:59:29 2014 UTC
@@ -5714,15 +5714,63 @@
   __ cmp(map, scratch0());
   DeoptimizeIf(ne, instr->environment());
 }
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ Push(object);
+  __ Push(index);
+  __ mov(cp, Operand::Zero());
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(r0, result);
+}


 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register result,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {
+    }
+    virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+
   Register object = ToRegister(instr->object());
   Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();

+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, result, object, index);
+
   Label out_of_object, done;
+
+  __ tst(index, Operand(Smi::FromInt(1)));
+  __ b(ne, deferred->entry());
+  __ mov(index, Operand(index, ASR, 1));
+
   __ cmp(index, Operand::Zero());
   __ b(lt, &out_of_object);

@@ -5738,6 +5786,7 @@
   __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
   __ ldr(result, FieldMemOperand(scratch,
                                  FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
   __ bind(&done);
 }

=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.h Wed Mar 12 09:59:36 2014 UTC +++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.h Mon Mar 31 11:59:29 2014 UTC
@@ -141,6 +141,10 @@
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register result,
+                                   Register object,
+                                   Register index);

   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
=======================================
--- /branches/bleeding_edge/src/arm64/lithium-arm64.cc Mon Mar 31 11:58:53 2014 UTC +++ /branches/bleeding_edge/src/arm64/lithium-arm64.cc Mon Mar 31 11:59:29 2014 UTC
@@ -2562,7 +2562,9 @@
 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
   LOperand* object = UseRegisterAtStart(instr->object());
   LOperand* index = UseRegister(instr->index());
-  return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
 }


=======================================
--- /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc Mon Mar 31 11:58:53 2014 UTC +++ /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc Mon Mar 31 11:59:29 2014 UTC
@@ -5877,16 +5877,63 @@

   __ Bind(&done);
 }
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ Push(object);
+  __ Push(index);
+  __ Mov(cp, 0);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(x0, result);
+}


 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register result,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {
+    }
+    virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
   Register object = ToRegister(instr->object());
   Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());

   __ AssertSmi(index);

+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, result, object, index);
+
   Label out_of_object, done;
+
+  __ TestAndBranchIfAnySet(
+ index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
+  __ Mov(index, Operand(index, ASR, 1));
+
   __ Cmp(index, Smi::FromInt(0));
   __ B(lt, &out_of_object);

@@ -5902,6 +5949,7 @@
__ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   __ Ldr(result, FieldMemOperand(result,
                                  FixedArray::kHeaderSize - kPointerSize));
+  __ Bind(deferred->exit());
   __ Bind(&done);
 }

=======================================
--- /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.h Tue Mar 25 13:45:17 2014 UTC +++ /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.h Mon Mar 31 11:59:29 2014 UTC
@@ -149,6 +149,10 @@
   void DoDeferredAllocate(LAllocate* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register result,
+                                   Register object,
+                                   Register index);

   Operand ToOperand32(LOperand* op, IntegerSignedness signedness);

=======================================
--- /branches/bleeding_edge/src/handles.cc      Fri Mar 28 09:49:27 2014 UTC
+++ /branches/bleeding_edge/src/handles.cc      Mon Mar 31 11:59:29 2014 UTC
@@ -682,6 +682,8 @@
             if (field_index >= map->inobject_properties()) {
field_index = -(field_index - map->inobject_properties() + 1);
             }
+            field_index = (field_index << 1)
+                | details.representation().IsDouble();
             indices->set(index, Smi::FromInt(field_index));
           }
         }
=======================================
--- /branches/bleeding_edge/src/hydrogen-instructions.h Fri Mar 28 15:25:24 2014 UTC +++ /branches/bleeding_edge/src/hydrogen-instructions.h Mon Mar 31 11:59:29 2014 UTC
@@ -7504,6 +7504,7 @@
                     HValue* index) {
     SetOperandAt(0, object);
     SetOperandAt(1, index);
+    SetChangesFlag(kNewSpacePromotion);
     set_representation(Representation::Tagged());
   }

=======================================
--- /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Mon Mar 31 11:58:53 2014 UTC +++ /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Mon Mar 31 11:59:29 2014 UTC
@@ -6321,13 +6321,58 @@
          FieldOperand(object, HeapObject::kMapOffset));
   DeoptimizeIf(not_equal, instr->environment());
 }
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ push(object);
+  __ push(index);
+  __ xor_(esi, esi);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(object, eax);
+}


 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register object,
+                              Register index,
+                              const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack),
+          instr_(instr),
+          object_(object),
+          index_(index) {
+    }
+    virtual void Generate() V8_OVERRIDE {
+      codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+    }
+    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+   private:
+    LLoadFieldByIndex* instr_;
+    Register object_;
+    Register index_;
+  };
+
   Register object = ToRegister(instr->object());
   Register index = ToRegister(instr->index());

+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, object, index, x87_stack_);
+
   Label out_of_object, done;
+  __ test(index, Immediate(Smi::FromInt(1)));
+  __ j(not_zero, deferred->entry());
+
+  __ sar(index, 1);
+
   __ cmp(index, Immediate(0));
   __ j(less, &out_of_object, Label::kNear);
   __ mov(object, FieldOperand(object,
@@ -6344,6 +6389,7 @@
                               index,
                               times_half_pointer_size,
                               FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
   __ bind(&done);
 }

=======================================
--- /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.h Tue Mar 11 14:41:22 2014 UTC +++ /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.h Mon Mar 31 11:59:29 2014 UTC
@@ -163,6 +163,9 @@
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register object,
+                                   Register index);

   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
=======================================
--- /branches/bleeding_edge/src/ia32/lithium-ia32.cc Mon Mar 31 11:58:53 2014 UTC +++ /branches/bleeding_edge/src/ia32/lithium-ia32.cc Mon Mar 31 11:59:29 2014 UTC
@@ -2702,7 +2702,9 @@
 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
   LOperand* object = UseRegister(instr->object());
   LOperand* index = UseTempRegister(instr->index());
-  return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
 }


=======================================
--- /branches/bleeding_edge/src/objects.cc      Mon Mar 31 07:48:13 2014 UTC
+++ /branches/bleeding_edge/src/objects.cc      Mon Mar 31 11:59:29 2014 UTC
@@ -5639,6 +5639,15 @@
   CALL_HEAP_FUNCTION(isolate,
                      isolate->heap()->CopyJSObject(*object), JSObject);
 }
+
+
+Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
+                                        Representation representation,
+                                        int index) {
+  Isolate* isolate = object->GetIsolate();
+  CALL_HEAP_FUNCTION(isolate,
+ object->FastPropertyAt(representation, index), Object);
+}


 template<class ContextObject>
=======================================
--- /branches/bleeding_edge/src/objects.h       Mon Mar 31 07:48:13 2014 UTC
+++ /branches/bleeding_edge/src/objects.h       Mon Mar 31 11:59:29 2014 UTC
@@ -2616,6 +2616,9 @@
   MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
       Representation representation,
       int index);
+  static Handle<Object> FastPropertyAt(Handle<JSObject> object,
+                                       Representation representation,
+                                       int index);
   inline Object* RawFastPropertyAt(int index);
   inline void FastPropertyAtPut(int index, Object* value);

=======================================
--- /branches/bleeding_edge/src/runtime.cc      Fri Mar 28 15:25:24 2014 UTC
+++ /branches/bleeding_edge/src/runtime.cc      Mon Mar 31 11:59:29 2014 UTC
@@ -14506,6 +14506,19 @@
   isolate->heap()->NotifyContextDisposed();
   return isolate->heap()->undefined_value();
 }
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LoadMutableDouble) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
+  int idx = index->value() >> 1;
+  if (idx < 0) {
+    idx = -idx + object->map()->inobject_properties() - 1;
+  }
+  return *JSObject::FastPropertyAt(object, Representation::Double(), idx);
+}


 RUNTIME_FUNCTION(MaybeObject*, Runtime_TryMigrateInstance) {
=======================================
--- /branches/bleeding_edge/src/runtime.h       Fri Mar 28 15:25:24 2014 UTC
+++ /branches/bleeding_edge/src/runtime.h       Mon Mar 31 11:59:29 2014 UTC
@@ -100,6 +100,7 @@
   F(DebugCallbackSupportsStepping, 1, 1) \
   F(DebugPrepareStepInIfStepping, 1, 1) \
   F(FlattenString, 1, 1) \
+  F(LoadMutableDouble, 2, 1) \
   F(TryMigrateInstance, 1, 1) \
   F(NotifyContextDisposed, 0, 1) \
   \
=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Mon Mar 31 11:58:53 2014 UTC +++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Mon Mar 31 11:59:29 2014 UTC
@@ -5562,13 +5562,57 @@
           FieldOperand(object, HeapObject::kMapOffset));
   DeoptimizeIf(not_equal, instr->environment());
 }
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object);
+  __ Push(index);
+  __ xorp(rsi, rsi);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(object, rax);
+}


 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          object_(object),
+          index_(index) {
+    }
+    virtual void Generate() V8_OVERRIDE {
+      codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+    }
+    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+   private:
+    LLoadFieldByIndex* instr_;
+    Register object_;
+    Register index_;
+  };
+
   Register object = ToRegister(instr->object());
   Register index = ToRegister(instr->index());

+  DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
+
   Label out_of_object, done;
+  __ Move(kScratchRegister, Smi::FromInt(1));
+  __ testq(index, kScratchRegister);
+  __ j(not_zero, deferred->entry());
+
+  __ sar(index, Immediate(1));
+
   __ SmiToInteger32(index, index);
   __ cmpl(index, Immediate(0));
   __ j(less, &out_of_object, Label::kNear);
@@ -5586,6 +5630,7 @@
                                index,
                                times_pointer_size,
                                FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
   __ bind(&done);
 }

=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.h Wed Mar 26 15:51:08 2014 UTC +++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.h Mon Mar 31 11:59:29 2014 UTC
@@ -116,6 +116,9 @@
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register object,
+                                   Register index);

 // Parallel move support.
   void DoParallelMove(LParallelMove* move);
=======================================
--- /branches/bleeding_edge/src/x64/lithium-x64.cc Mon Mar 31 11:58:53 2014 UTC +++ /branches/bleeding_edge/src/x64/lithium-x64.cc Mon Mar 31 11:59:29 2014 UTC
@@ -2589,7 +2589,9 @@
 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
   LOperand* object = UseRegister(instr->object());
   LOperand* index = UseTempRegister(instr->index());
-  return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
 }


=======================================
--- /branches/bleeding_edge/test/mjsunit/fuzz-natives-part1.js Wed Mar 26 15:51:48 2014 UTC +++ /branches/bleeding_edge/test/mjsunit/fuzz-natives-part1.js Mon Mar 31 11:59:29 2014 UTC
@@ -182,6 +182,9 @@
   // Only applicable to TypedArrays.
   "_TypedArrayInitialize": true,

+  // Only applicable to loading mutable doubles.
+  "LoadMutableDouble": true,
+
   // Only applicable to generators.
   "_GeneratorNext": true,
   "_GeneratorThrow": true,

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to