Revision: 21848
Author:   [email protected]
Date:     Mon Jun 16 00:04:47 2014 UTC
Log:      Version 3.27.29 (based on bleeding_edge revision r21835)

Emulate MLS on pre-ARMv6T2. Cleaned up thumbee vs. thumb2 confusion.

X87: Fixed flooring division by a power of 2, once again.. (issue 3259).

Fixed undefined behavior in RNG (Chromium issue 377790).

Performance and stability improvements on all platforms.
http://code.google.com/p/v8/source/detail?r=21848

Added:
 /trunk/test/mjsunit/regress/regress-gvn-ftt.js
Modified:
 /trunk/ChangeLog
 /trunk/src/arm/assembler-arm.cc
 /trunk/src/arm/lithium-codegen-arm.cc
 /trunk/src/arm/macro-assembler-arm.cc
 /trunk/src/arm/macro-assembler-arm.h
 /trunk/src/cpu.cc
 /trunk/src/cpu.h
 /trunk/src/flag-definitions.h
 /trunk/src/globals.h
 /trunk/src/hydrogen-gvn.cc
 /trunk/src/hydrogen-instructions.h
 /trunk/src/ic.cc
 /trunk/src/log.cc
 /trunk/src/log.h
 /trunk/src/mips/lithium-codegen-mips.cc
 /trunk/src/objects.cc
 /trunk/src/objects.h
 /trunk/src/runtime.cc
 /trunk/src/utils/random-number-generator.cc
 /trunk/src/version.cc
 /trunk/src/x64/lithium-codegen-x64.cc
 /trunk/src/x64/lithium-codegen-x64.h
 /trunk/src/x64/lithium-x64.cc
 /trunk/src/x64/lithium-x64.h
 /trunk/src/x87/lithium-codegen-x87.cc
 /trunk/test/mjsunit/fast-non-keyed.js
 /trunk/test/mjsunit/mjsunit.status
 /trunk/tools/fuzz-harness.sh
 /trunk/tools/profviz/composer.js

=======================================
--- /dev/null
+++ /trunk/test/mjsunit/regress/regress-gvn-ftt.js Mon Jun 16 00:04:47 2014 UTC
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --track-field-types --use-gvn
+
+function A(id) {
+  this.id = id;
+}
+
+var a1 = new A(1);
+var a2 = new A(2);
+
+
+var g;
+function f(o, value) {
+  g = o.o;
+  o.o = value;
+  return o.o;
+}
+
+var obj = {o: a1};
+
+f(obj, a1);
+f(obj, a1);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(a2.id, f(obj, a2).id);
=======================================
--- /trunk/ChangeLog    Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/ChangeLog    Mon Jun 16 00:04:47 2014 UTC
@@ -1,3 +1,14 @@
+2014-06-16: Version 3.27.29
+
+ Emulate MLS on pre-ARMv6T2. Cleaned up thumbee vs. thumb2 confusion.
+
+ X87: Fixed flooring division by a power of 2, once again.. (issue 3259).
+
+        Fixed undefined behavior in RNG (Chromium issue 377790).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-06-13: Version 3.27.28

         Add v8::Promise::Then (Chromium issue 371288).
=======================================
--- /trunk/src/arm/assembler-arm.cc     Thu Jun 12 00:04:49 2014 UTC
+++ /trunk/src/arm/assembler-arm.cc     Mon Jun 16 00:04:47 2014 UTC
@@ -84,10 +84,11 @@
     supported_ |= 1u << ARMv7;
     if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
     if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
-    if (FLAG_enable_sudiv)  supported_ |= 1u << SUDIV;
+    if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
     if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
   }
+  if (FLAG_enable_mls) supported_ |= 1u << MLS;
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;

 #else  // __arm__
@@ -102,6 +103,7 @@

   if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
   if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
+  if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;

   if (cpu.architecture() >= 7) {
     if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
@@ -744,7 +746,7 @@
 // same position.


-int Assembler::target_at(int pos)  {
+int Assembler::target_at(int pos) {
   Instr instr = instr_at(pos);
   if (is_uint24(instr)) {
     // Emitted link to a label, not part of a branch.
@@ -1481,6 +1483,7 @@
void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
                     Condition cond) {
   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+  ASSERT(IsEnabled(MLS));
   emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
        src2.code()*B8 | B7 | B4 | src1.code());
 }
=======================================
--- /trunk/src/arm/lithium-codegen-arm.cc       Thu Jun 12 00:04:49 2014 UTC
+++ /trunk/src/arm/lithium-codegen-arm.cc       Mon Jun 16 00:04:47 2014 UTC
@@ -1215,7 +1215,7 @@
     //   mls r3, r3, r2, r1

     __ sdiv(result_reg, left_reg, right_reg);
-    __ mls(result_reg, result_reg, right_reg, left_reg);
+    __ Mls(result_reg, result_reg, right_reg, left_reg);

     // If we care about -0, test if the dividend is <0 and the result is 0.
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1413,7 +1413,7 @@
   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
     // Compute remainder and deopt if it's not zero.
     Register remainder = scratch0();
-    __ mls(remainder, result, divisor, dividend);
+    __ Mls(remainder, result, divisor, dividend);
     __ cmp(remainder, Operand::Zero());
     DeoptimizeIf(ne, instr->environment());
   }
@@ -1588,7 +1588,7 @@

   Label done;
   Register remainder = scratch0();
-  __ mls(remainder, result, right, left);
+  __ Mls(remainder, result, right, left);
   __ cmp(remainder, Operand::Zero());
   __ b(eq, &done);
   __ eor(remainder, remainder, Operand(right));
=======================================
--- /trunk/src/arm/macro-assembler-arm.cc       Wed Jun  4 00:06:13 2014 UTC
+++ /trunk/src/arm/macro-assembler-arm.cc       Mon Jun 16 00:04:47 2014 UTC
@@ -252,6 +252,19 @@
     vmov(dst, src);
   }
 }
+
+
+void MacroAssembler::Mls(Register dst, Register src1, Register src2,
+                         Register srcA, Condition cond) {
+  if (CpuFeatures::IsSupported(MLS)) {
+    CpuFeatureScope scope(this, MLS);
+    mls(dst, src1, src2, srcA, cond);
+  } else {
+    ASSERT(!dst.is(srcA));
+    mul(ip, src1, src2, LeaveCC, cond);
+    sub(dst, srcA, ip, LeaveCC, cond);
+  }
+}


 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
=======================================
--- /trunk/src/arm/macro-assembler-arm.h        Wed Jun  4 00:06:13 2014 UTC
+++ /trunk/src/arm/macro-assembler-arm.h        Mon Jun 16 00:04:47 2014 UTC
@@ -117,7 +117,8 @@
             Register scratch = no_reg,
             Condition cond = al);

-
+  void Mls(Register dst, Register src1, Register src2, Register srcA,
+           Condition cond = al);
   void And(Register dst, Register src1, const Operand& src2,
            Condition cond = al);
   void Ubfx(Register dst, Register src, int lsb, int width,
=======================================
--- /trunk/src/cpu.cc   Thu Jun 12 08:25:21 2014 UTC
+++ /trunk/src/cpu.cc   Mon Jun 16 00:04:47 2014 UTC
@@ -259,7 +259,7 @@
              has_sse42_(false),
              has_idiva_(false),
              has_neon_(false),
-             has_thumbee_(false),
+             has_thumb2_(false),
              has_vfp_(false),
              has_vfp3_(false),
              has_vfp3_d32_(false) {
@@ -383,7 +383,6 @@
   if (hwcaps != 0) {
     has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
     has_neon_ = (hwcaps & HWCAP_NEON) != 0;
-    has_thumbee_ = (hwcaps & HWCAP_THUMBEE) != 0;
     has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
     has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
@@ -393,7 +392,7 @@
     char* features = cpu_info.ExtractField("Features");
     has_idiva_ = HasListItem(features, "idiva");
     has_neon_ = HasListItem(features, "neon");
-    has_thumbee_ = HasListItem(features, "thumbee");
+    has_thumb2_ = HasListItem(features, "thumb2");
     has_vfp_ = HasListItem(features, "vfp");
     if (HasListItem(features, "vfpv3d16")) {
       has_vfp3_ = true;
@@ -417,13 +416,13 @@
     architecture_ = 7;
   }

-  // ARMv7 implies ThumbEE.
+  // ARMv7 implies Thumb2.
   if (architecture_ >= 7) {
-    has_thumbee_ = true;
+    has_thumb2_ = true;
   }

-  // The earliest architecture with ThumbEE is ARMv6T2.
-  if (has_thumbee_ && architecture_ < 6) {
+  // The earliest architecture with Thumb2 is ARMv6T2.
+  if (has_thumb2_ && architecture_ < 6) {
     architecture_ = 6;
   }

@@ -435,10 +434,10 @@
   uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags;
   if (cpu_flags & ARM_CPU_FLAG_V7) {
     architecture_ = 7;
-    has_thumbee_ = true;
+    has_thumb2_ = true;
   } else if (cpu_flags & ARM_CPU_FLAG_V6) {
     architecture_ = 6;
-    // QNX doesn't say if ThumbEE is available.
+    // QNX doesn't say if Thumb2 is available.
     // Assume false for the architectures older than ARMv7.
   }
   ASSERT(architecture_ >= 6);
=======================================
--- /trunk/src/cpu.h    Wed Jun  4 00:06:13 2014 UTC
+++ /trunk/src/cpu.h    Mon Jun 16 00:04:47 2014 UTC
@@ -72,7 +72,7 @@
   // arm features
   bool has_idiva() const { return has_idiva_; }
   bool has_neon() const { return has_neon_; }
-  bool has_thumbee() const { return has_thumbee_; }
+  bool has_thumb2() const { return has_thumb2_; }
   bool has_vfp() const { return has_vfp_; }
   bool has_vfp3() const { return has_vfp3_; }
   bool has_vfp3_d32() const { return has_vfp3_d32_; }
@@ -103,7 +103,7 @@
   bool has_sse42_;
   bool has_idiva_;
   bool has_neon_;
-  bool has_thumbee_;
+  bool has_thumb2_;
   bool has_vfp_;
   bool has_vfp3_;
   bool has_vfp3_d32_;
=======================================
--- /trunk/src/flag-definitions.h       Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/flag-definitions.h       Mon Jun 16 00:04:47 2014 UTC
@@ -373,6 +373,8 @@
             "enable use of NEON instructions if available (ARM only)")
 DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
+DEFINE_bool(enable_mls, true,
+            "enable use of MLS instructions if available (ARM only)")
 DEFINE_bool(enable_movw_movt, false,
             "enable loading 32-bit constant by means of movw/movt "
             "instruction pairs (ARM only)")
=======================================
--- /trunk/src/globals.h        Wed Jun  4 00:06:13 2014 UTC
+++ /trunk/src/globals.h        Mon Jun 16 00:04:47 2014 UTC
@@ -616,6 +616,7 @@
     VFP3,
     ARMv7,
     SUDIV,
+    MLS,
     UNALIGNED_ACCESSES,
     MOVW_MOVT_IMMEDIATE_LOADS,
     VFP32DREGS,
=======================================
--- /trunk/src/hydrogen-gvn.cc  Wed Jun  4 00:06:13 2014 UTC
+++ /trunk/src/hydrogen-gvn.cc  Mon Jun 16 00:04:47 2014 UTC
@@ -466,7 +466,7 @@
 bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access,
                                               int* index) {
   for (int i = 0; i < num_inobject_fields_; ++i) {
-    if (access.Equals(inobject_fields_[i])) {
+    if (access.SameField(inobject_fields_[i])) {
       *index = i;
       return true;
     }
=======================================
--- /trunk/src/hydrogen-instructions.h  Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/hydrogen-instructions.h  Mon Jun 16 00:04:47 2014 UTC
@@ -6202,7 +6202,14 @@
   void PrintTo(StringStream* stream) const;

   inline bool Equals(HObjectAccess that) const {
-    return value_ == that.value_;  // portion and offset must match
+    return value_ == that.value_;
+  }
+
+ // Returns true if |this| access refers to the same field as |that|, which
+  // means that both have same |offset| and |portion| values.
+  inline bool SameField(HObjectAccess that) const {
+    uint32_t mask = PortionField::kMask | OffsetField::kMask;
+    return (value_ & mask) == (that.value_ & mask);
   }

  protected:
=======================================
--- /trunk/src/ic.cc    Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/ic.cc    Mon Jun 16 00:04:47 2014 UTC
@@ -1326,8 +1326,7 @@
       TRACE_IC("StoreIC", name);
     } else if (can_store) {
       UpdateCaches(&lookup, receiver, name, value);
-    } else if (!name->IsCacheable(isolate()) ||
-               lookup.IsNormal() ||
+    } else if (lookup.IsNormal() ||
                (lookup.IsField() && lookup.CanHoldValue(value))) {
       Handle<Code> stub = generic_stub();
       set_target(*stub);
=======================================
--- /trunk/src/log.cc   Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/log.cc   Mon Jun 16 00:04:47 2014 UTC
@@ -1074,6 +1074,16 @@
   msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
   msg.WriteToLogFile();
 }
+
+
+void Logger::CurrentTimeEvent() {
+  if (!log_->IsEnabled()) return;
+  ASSERT(FLAG_log_internal_timer_events);
+  Log::MessageBuilder msg(log_);
+  int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
+  msg.Append("current-time,%ld\n", since_epoch);
+  msg.WriteToLogFile();
+}


 void Logger::TimerEvent(StartEnd se, const char* name) {
=======================================
--- /trunk/src/log.h    Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/log.h    Mon Jun 16 00:04:47 2014 UTC
@@ -290,6 +290,7 @@
   enum StartEnd { START, END };

   void CodeDeoptEvent(Code* code);
+  void CurrentTimeEvent();

   void TimerEvent(StartEnd se, const char* name);

=======================================
--- /trunk/src/mips/lithium-codegen-mips.cc     Thu Jun 12 00:04:49 2014 UTC
+++ /trunk/src/mips/lithium-codegen-mips.cc     Mon Jun 16 00:04:47 2014 UTC
@@ -1327,21 +1327,21 @@
   }

   // If the divisor is negative, we have to negate and handle edge cases.
-  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-    // divident can be the same register as result so save the value of it
-    // for checking overflow.
-    __ Move(scratch, dividend);
-  }
+
+  // dividend can be the same register as result so save the value of it
+  // for checking overflow.
+  __ Move(scratch, dividend);
+
   __ Subu(result, zero_reg, dividend);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
   }

   // Dividing by -1 is basically negation, unless we overflow.
-  __ Xor(at, scratch, result);
+  __ Xor(scratch, scratch, result);
   if (divisor == -1) {
     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(ge, instr->environment(), at, Operand(zero_reg));
+      DeoptimizeIf(ge, instr->environment(), scratch, Operand(zero_reg));
     }
     return;
   }
@@ -1353,7 +1353,7 @@
   }

   Label no_overflow, done;
-  __ Branch(&no_overflow, lt, at, Operand(zero_reg));
+  __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
   __ li(result, Operand(kMinInt / divisor));
   __ Branch(&done);
   __ bind(&no_overflow);
=======================================
--- /trunk/src/objects.cc       Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/objects.cc       Mon Jun 16 00:04:47 2014 UTC
@@ -1802,11 +1802,7 @@
     return MaybeHandle<Map>();
   }

-  // Normalize the object if the name is an actual name (not the
-  // hidden strings) and is not a real identifier.
-  // Normalize the object if it will have too many fast properties.
   Isolate* isolate = map->GetIsolate();
-  if (!name->IsCacheable(isolate)) return MaybeHandle<Map>();

   // Compute the new index for new field.
   int index = map->NextFreePropertyIndex();
@@ -8403,30 +8399,6 @@
 #endif


-static bool IsIdentifier(UnicodeCache* cache, Name* name) {
-  // Checks whether the buffer contains an identifier (no escape).
-  if (!name->IsString()) return false;
-  String* string = String::cast(name);
-  if (string->length() == 0) return true;
-  ConsStringIteratorOp op;
-  StringCharacterStream stream(string, &op);
-  if (!cache->IsIdentifierStart(stream.GetNext())) {
-    return false;
-  }
-  while (stream.HasMore()) {
-    if (!cache->IsIdentifierPart(stream.GetNext())) {
-      return false;
-    }
-  }
-  return true;
-}
-
-
-bool Name::IsCacheable(Isolate* isolate) {
-  return IsSymbol() || IsIdentifier(isolate->unicode_cache(), this);
-}
-
-
 bool String::LooksValid() {
   if (!GetIsolate()->heap()->Contains(this)) return false;
   return true;
=======================================
--- /trunk/src/objects.h        Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/objects.h        Mon Jun 16 00:04:47 2014 UTC
@@ -8928,8 +8928,6 @@
   // Casting.
   static inline Name* cast(Object* obj);

-  bool IsCacheable(Isolate* isolate);
-
   DECLARE_PRINTER(Name)

   // Layout description.
=======================================
--- /trunk/src/runtime.cc       Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/runtime.cc       Mon Jun 16 00:04:47 2014 UTC
@@ -245,13 +245,14 @@
   int length = constant_properties->length();
   bool should_transform =
       !is_result_from_cache && boilerplate->HasFastProperties();
-  if (should_transform || has_function_literal) {
-    // Normalize the properties of object to avoid n^2 behavior
- // when extending the object multiple properties. Indicate the number of
-    // properties to be added.
+  bool should_normalize = should_transform || has_function_literal;
+  if (should_normalize) {
+    // TODO(verwaest): We might not want to ever normalize here.
     JSObject::NormalizeProperties(
         boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
   }
+  Object::ValueType value_type = should_normalize
+      ? Object::FORCE_TAGGED : Object::OPTIMAL_REPRESENTATION;

   // TODO(verwaest): Support tracking representations in the boilerplate.
   for (int index = 0; index < length; index +=2) {
@@ -279,7 +280,7 @@
         ASSERT(!name->AsArrayIndex(&element_index));
         maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(
             boilerplate, name, value, NONE,
-            Object::OPTIMAL_REPRESENTATION, mode);
+            value_type, mode);
       }
     } else if (key->ToArrayIndex(&element_index)) {
       // Array index (uint32).
@@ -295,7 +296,7 @@
Handle<String> name = isolate->factory()->NewStringFromAsciiChecked(str);
       maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(
           boilerplate, name, value, NONE,
-          Object::OPTIMAL_REPRESENTATION, mode);
+          value_type, mode);
     }
     // If setting the property on the boilerplate throws an
     // exception, the exception is converted to an empty handle in
@@ -9636,6 +9637,7 @@
 RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 0);
+  if (FLAG_log_timer_events) LOG(isolate, CurrentTimeEvent());

   // According to ECMA-262, section 15.9.1, page 117, the precision of
   // the number in a Date object representing a particular instant in
=======================================
--- /trunk/src/utils/random-number-generator.cc Wed Jun  4 00:06:13 2014 UTC
+++ /trunk/src/utils/random-number-generator.cc Mon Jun 16 00:04:47 2014 UTC
@@ -117,7 +117,13 @@
 int RandomNumberGenerator::Next(int bits) {
   ASSERT_LT(0, bits);
   ASSERT_GE(32, bits);
-  int64_t seed = (seed_ * kMultiplier + kAddend) & kMask;
+ // Do unsigned multiplication, which has the intended modulo semantics, while
+  // signed multiplication would expose undefined behavior.
+  uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
+  // Assigning a uint64_t to an int64_t is implementation defined, but this
+ // should be OK. Use a static_cast to explicitly state that we know what we're
+  // doing. (Famous last words...)
+  int64_t seed = static_cast<int64_t>((product + kAddend) & kMask);
   seed_ = seed;
   return static_cast<int>(seed >> (48 - bits));
 }
=======================================
--- /trunk/src/version.cc       Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/version.cc       Mon Jun 16 00:04:47 2014 UTC
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     27
-#define BUILD_NUMBER      28
+#define BUILD_NUMBER      29
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
=======================================
--- /trunk/src/x64/lithium-codegen-x64.cc       Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/x64/lithium-codegen-x64.cc       Mon Jun 16 00:04:47 2014 UTC
@@ -436,8 +436,17 @@


 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+                                   const Representation& r) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  return constant->Integer32Value();
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  ASSERT(SmiValuesAre31Bits() && r.IsSmiOrTagged());
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
 }


@@ -1463,8 +1472,11 @@
     }
     __ j(not_zero, &done, Label::kNear);
     if (right->IsConstantOperand()) {
-      // Constant can't be represented as Smi due to immediate size limit.
-      ASSERT(!instr->hydrogen_value()->representation().IsSmi());
+      // Constant can't be represented as 32-bit Smi due to immediate size
+      // limit.
+      ASSERT(SmiValuesAre32Bits()
+          ? !instr->hydrogen_value()->representation().IsSmi()
+          : SmiValuesAre31Bits());
       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
         DeoptimizeIf(no_condition, instr->environment());
       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
@@ -1499,7 +1511,9 @@
   ASSERT(left->IsRegister());

   if (right->IsConstantOperand()) {
-    int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
+    int32_t right_operand =
+        ToRepresentation(LConstantOperand::cast(right),
+                         instr->hydrogen()->right()->representation());
     switch (instr->op()) {
       case Token::BIT_AND:
         __ andl(ToRegister(left), Immediate(right_operand));
@@ -1631,7 +1645,20 @@
       case Token::SHL:
         if (shift_count != 0) {
           if (instr->hydrogen_value()->representation().IsSmi()) {
-            __ shlp(ToRegister(left), Immediate(shift_count));
+            if (SmiValuesAre32Bits()) {
+              __ shlp(ToRegister(left), Immediate(shift_count));
+            } else {
+              ASSERT(SmiValuesAre31Bits());
+              if (instr->can_deopt()) {
+                if (shift_count != 1) {
+                  __ shll(ToRegister(left), Immediate(shift_count - 1));
+                }
+                __ Integer32ToSmi(ToRegister(left), ToRegister(left));
+                DeoptimizeIf(overflow, instr->environment());
+              } else {
+                __ shll(ToRegister(left), Immediate(shift_count));
+              }
+            }
           } else {
             __ shll(ToRegister(left), Immediate(shift_count));
           }
@@ -1651,8 +1678,10 @@
   ASSERT(left->Equals(instr->result()));

   if (right->IsConstantOperand()) {
-    __ subl(ToRegister(left),
-            Immediate(ToInteger32(LConstantOperand::cast(right))));
+    int32_t right_operand =
+        ToRepresentation(LConstantOperand::cast(right),
+                         instr->hydrogen()->right()->representation());
+    __ subl(ToRegister(left), Immediate(right_operand));
   } else if (right->IsRegister()) {
     if (instr->hydrogen_value()->representation().IsSmi()) {
       __ subp(ToRegister(left), ToRegister(right));
@@ -1853,8 +1882,11 @@

   if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
     if (right->IsConstantOperand()) {
-      ASSERT(!target_rep.IsSmi());  // No support for smi-immediates.
-      int32_t offset = ToInteger32(LConstantOperand::cast(right));
+      // No support for smi-immediates for 32-bit SMI.
+ ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+      int32_t offset =
+          ToRepresentation(LConstantOperand::cast(right),
+                           instr->hydrogen()->right()->representation());
       if (is_p) {
         __ leap(ToRegister(instr->result()),
                 MemOperand(ToRegister(left), offset));
@@ -1872,13 +1904,15 @@
     }
   } else {
     if (right->IsConstantOperand()) {
-      ASSERT(!target_rep.IsSmi());  // No support for smi-immediates.
+      // No support for smi-immediates for 32-bit SMI.
+ ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+      int32_t right_operand =
+          ToRepresentation(LConstantOperand::cast(right),
+                           instr->hydrogen()->right()->representation());
       if (is_p) {
-        __ addp(ToRegister(left),
-                Immediate(ToInteger32(LConstantOperand::cast(right))));
+        __ addp(ToRegister(left), Immediate(right_operand));
       } else {
-        __ addl(ToRegister(left),
-                Immediate(ToInteger32(LConstantOperand::cast(right))));
+        __ addl(ToRegister(left), Immediate(right_operand));
       }
     } else if (right->IsRegister()) {
       if (is_p) {
@@ -1912,9 +1946,12 @@
         : greater_equal;
     Register left_reg = ToRegister(left);
     if (right->IsConstantOperand()) {
-      Immediate right_imm =
-          Immediate(ToInteger32(LConstantOperand::cast(right)));
-      ASSERT(!instr->hydrogen_value()->representation().IsSmi());
+      Immediate right_imm = Immediate(
+          ToRepresentation(LConstantOperand::cast(right),
+                           instr->hydrogen()->right()->representation()));
+      ASSERT(SmiValuesAre32Bits()
+          ? !instr->hydrogen()->representation().IsSmi()
+          : SmiValuesAre31Bits());
       __ cmpl(left_reg, right_imm);
       __ j(condition, &return_left, Label::kNear);
       __ movp(left_reg, right_imm);
@@ -3042,9 +3079,22 @@
 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   ElementsKind elements_kind = instr->elements_kind();
   LOperand* key = instr->key();
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
+    Register key_reg = ToRegister(key);
+    Representation key_representation =
+        instr->hydrogen()->key()->representation();
+    if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
+      __ SmiToInteger64(key_reg, key_reg);
+    } else if (instr->hydrogen()->IsDehoisted()) {
+      // Sign extend key because it could be a 32 bit negative value
+      // and the dehoisted address computation happens in 64 bits
+      __ movsxlq(key_reg, key_reg);
+    }
+  }
   Operand operand(BuildFastArrayOperand(
       instr->elements(),
       key,
+      instr->hydrogen()->key()->representation(),
       elements_kind,
       instr->base_offset()));

@@ -3111,10 +3161,17 @@
 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   XMMRegister result(ToDoubleRegister(instr->result()));
   LOperand* key = instr->key();
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+      instr->hydrogen()->IsDehoisted()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    __ movsxlq(ToRegister(key), ToRegister(key));
+  }
   if (instr->hydrogen()->RequiresHoleCheck()) {
     Operand hole_check_operand = BuildFastArrayOperand(
         instr->elements(),
         key,
+        instr->hydrogen()->key()->representation(),
         FAST_DOUBLE_ELEMENTS,
         instr->base_offset() + sizeof(kHoleNanLower32));
     __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
@@ -3124,6 +3181,7 @@
   Operand double_load_operand = BuildFastArrayOperand(
       instr->elements(),
       key,
+      instr->hydrogen()->key()->representation(),
       FAST_DOUBLE_ELEMENTS,
       instr->base_offset());
   __ movsd(result, double_load_operand);
@@ -3138,6 +3196,12 @@
   Representation representation = hinstr->representation();
   int offset = instr->base_offset();

+  if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+      instr->hydrogen()->IsDehoisted()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    __ movsxlq(ToRegister(key), ToRegister(key));
+  }
   if (representation.IsInteger32() && SmiValuesAre32Bits() &&
       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
     ASSERT(!requires_hole_check);
@@ -3146,6 +3210,7 @@
       __ Load(scratch,
               BuildFastArrayOperand(instr->elements(),
                                     key,
+ instr->hydrogen()->key()->representation(),
                                     FAST_ELEMENTS,
                                     offset),
               Representation::Smi());
@@ -3160,6 +3225,7 @@
   __ Load(result,
           BuildFastArrayOperand(instr->elements(),
                                 key,
+                                instr->hydrogen()->key()->representation(),
                                 FAST_ELEMENTS,
                                 offset),
           representation);
@@ -3191,6 +3257,7 @@
 Operand LCodeGen::BuildFastArrayOperand(
     LOperand* elements_pointer,
     LOperand* key,
+    Representation key_representation,
     ElementsKind elements_kind,
     uint32_t offset) {
   Register elements_pointer_reg = ToRegister(elements_pointer);
@@ -3203,6 +3270,11 @@
     return Operand(elements_pointer_reg,
                    (constant_value << shift_size) + offset);
   } else {
+    // Take the tag bit into account while computing the shift size.
+    if (key_representation.IsSmi() && (shift_size >= 1)) {
+      ASSERT(SmiValuesAre31Bits());
+      shift_size -= kSmiTagSize;
+    }
     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
     return Operand(elements_pointer_reg,
                    ToRegister(key),
@@ -4176,9 +4248,22 @@
 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   ElementsKind elements_kind = instr->elements_kind();
   LOperand* key = instr->key();
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
+    Register key_reg = ToRegister(key);
+    Representation key_representation =
+        instr->hydrogen()->key()->representation();
+    if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
+      __ SmiToInteger64(key_reg, key_reg);
+    } else if (instr->hydrogen()->IsDehoisted()) {
+      // Sign extend key because it could be a 32 bit negative value
+      // and the dehoisted address computation happens in 64 bits
+      __ movsxlq(key_reg, key_reg);
+    }
+  }
   Operand operand(BuildFastArrayOperand(
       instr->elements(),
       key,
+      instr->hydrogen()->key()->representation(),
       elements_kind,
       instr->base_offset()));

@@ -4235,6 +4320,12 @@
 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   XMMRegister value = ToDoubleRegister(instr->value());
   LOperand* key = instr->key();
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+      instr->hydrogen()->IsDehoisted()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    __ movsxlq(ToRegister(key), ToRegister(key));
+  }
   if (instr->NeedsCanonicalization()) {
     Label have_value;

@@ -4251,6 +4342,7 @@
   Operand double_store_operand = BuildFastArrayOperand(
       instr->elements(),
       key,
+      instr->hydrogen()->key()->representation(),
       FAST_DOUBLE_ELEMENTS,
       instr->base_offset());

@@ -4264,6 +4356,12 @@
   int offset = instr->base_offset();
   Representation representation = hinstr->value()->representation();

+  if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+      instr->hydrogen()->IsDehoisted()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    __ movsxlq(ToRegister(key), ToRegister(key));
+  }
   if (representation.IsInteger32() && SmiValuesAre32Bits()) {
     ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
     ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
@@ -4272,6 +4370,7 @@
       __ Load(scratch,
               BuildFastArrayOperand(instr->elements(),
                                     key,
+ instr->hydrogen()->key()->representation(),
                                     FAST_ELEMENTS,
                                     offset),
               Representation::Smi());
@@ -4286,6 +4385,7 @@
   Operand operand =
       BuildFastArrayOperand(instr->elements(),
                             key,
+                            instr->hydrogen()->key()->representation(),
                             FAST_ELEMENTS,
                             offset);
   if (instr->value()->IsRegister()) {
@@ -4706,8 +4806,8 @@
   Register output = ToRegister(instr->result());
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       hchange->value()->CheckFlag(HValue::kUint32)) {
-    __ testl(input, input);
-    DeoptimizeIf(sign, instr->environment());
+    Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
+    DeoptimizeIf(NegateCondition(is_smi), instr->environment());
   }
   __ Integer32ToSmi(output, input);
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
=======================================
--- /trunk/src/x64/lithium-codegen-x64.h        Wed Jun 11 00:05:05 2014 UTC
+++ /trunk/src/x64/lithium-codegen-x64.h        Mon Jun 16 00:04:47 2014 UTC
@@ -65,6 +65,7 @@
   bool IsInteger32Constant(LConstantOperand* op) const;
   bool IsDehoistedKeyConstant(LConstantOperand* op) const;
   bool IsSmiConstant(LConstantOperand* op) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
   int32_t ToInteger32(LConstantOperand* op) const;
   Smi* ToSmi(LConstantOperand* op) const;
   double ToDouble(LConstantOperand* op) const;
@@ -231,6 +232,7 @@
   Operand BuildFastArrayOperand(
       LOperand* elements_pointer,
       LOperand* key,
+      Representation key_representation,
       ElementsKind elements_kind,
       uint32_t base_offset);

=======================================
--- /trunk/src/x64/lithium-x64.cc       Fri Jun 13 00:05:05 2014 UTC
+++ /trunk/src/x64/lithium-x64.cc       Mon Jun 16 00:04:47 2014 UTC
@@ -699,17 +699,23 @@
     HValue* right_value = instr->right();
     LOperand* right = NULL;
     int constant_value = 0;
+    bool does_deopt = false;
     if (right_value->IsConstant()) {
       HConstant* constant = HConstant::cast(right_value);
       right = chunk_->DefineConstantOperand(constant);
       constant_value = constant->Integer32Value() & 0x1f;
+      if (SmiValuesAre31Bits() && instr->representation().IsSmi() &&
+          constant_value > 0) {
+        // Left shift can deoptimize if we shift by > 0 and the result
+        // cannot be truncated to smi.
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
     } else {
       right = UseFixed(right_value, rcx);
     }

// Shift operations can only deoptimize if we do a logical shift by 0 and
     // the result cannot be truncated to int32.
-    bool does_deopt = false;
     if (op == Token::SHR && constant_value == 0) {
       if (FLAG_opt_safe_uint32_operations) {
         does_deopt = !instr->CheckFlag(HInstruction::kUint32);
@@ -1521,7 +1527,7 @@
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     HValue* right_candidate = instr->BetterRightOperand();
     LOperand* right;
-    if (instr->representation().IsSmi()) {
+    if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
       // We cannot add a tagged immediate to a tagged value,
       // so we request it in a register.
       right = UseRegisterAtStart(right_candidate);
@@ -2126,11 +2132,24 @@


 LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
-  ASSERT(instr->key()->representation().IsInteger32());
+  ASSERT((kPointerSize == kInt64Size &&
+          instr->key()->representation().IsInteger32()) ||
+         (kPointerSize == kInt32Size &&
+          instr->key()->representation().IsSmiOrInteger32()));
   ElementsKind elements_kind = instr->elements_kind();
-  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LOperand* key = NULL;
   LInstruction* result = NULL;

+  if (kPointerSize == kInt64Size) {
+    key = UseRegisterOrConstantAtStart(instr->key());
+  } else {
+    bool clobbers_key = ExternalArrayOpRequiresTemp(
+        instr->key()->representation(), elements_kind);
+    key = clobbers_key
+        ? UseTempRegister(instr->key())
+        : UseRegisterOrConstantAtStart(instr->key());
+  }
+
   if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) {
     FindDehoistedKeyDefinitions(instr->key());
   }
@@ -2224,7 +2243,16 @@
       elements_kind == FLOAT32_ELEMENTS;
   LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
       : UseRegister(instr->value());
-  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LOperand* key = NULL;
+  if (kPointerSize == kInt64Size) {
+    key = UseRegisterOrConstantAtStart(instr->key());
+  } else {
+    bool clobbers_key = ExternalArrayOpRequiresTemp(
+        instr->key()->representation(), elements_kind);
+    key = clobbers_key
+        ? UseTempRegister(instr->key())
+        : UseRegisterOrConstantAtStart(instr->key());
+  }
   LOperand* backing_store = UseRegister(instr->elements());
   return new(zone()) LStoreKeyed(backing_store, key, val);
 }
=======================================
--- /trunk/src/x64/lithium-x64.h        Wed Jun 11 00:05:05 2014 UTC
+++ /trunk/src/x64/lithium-x64.h        Mon Jun 16 00:04:47 2014 UTC
@@ -1606,6 +1606,22 @@

   Heap::RootListIndex index() const { return hydrogen()->index(); }
 };
+
+
+inline static bool ExternalArrayOpRequiresTemp(
+    Representation key_representation,
+    ElementsKind elements_kind) {
+ // Operations that require the key to be divided by two to be converted into
+  // an index cannot fold the scale operation into a load and need an extra
+  // temp register to do the work.
+  return SmiValuesAre31Bits() && key_representation.IsSmi() &&
+      (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+       elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+       elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+       elements_kind == UINT8_ELEMENTS ||
+       elements_kind == INT8_ELEMENTS ||
+       elements_kind == UINT8_CLAMPED_ELEMENTS);
+}


 class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
=======================================
--- /trunk/src/x87/lithium-codegen-x87.cc       Thu Jun 12 00:04:49 2014 UTC
+++ /trunk/src/x87/lithium-codegen-x87.cc       Mon Jun 16 00:04:47 2014 UTC
@@ -1546,14 +1546,17 @@
     DeoptimizeIf(zero, instr->environment());
   }

-  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-    __ sar(dividend, shift);
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+      DeoptimizeIf(overflow, instr->environment());
+    }
     return;
   }

-  // Dividing by -1 is basically negation, unless we overflow.
-  if (divisor == -1) {
-    DeoptimizeIf(overflow, instr->environment());
+  // If the negation could not overflow, simply shifting is OK.
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ sar(dividend, shift);
     return;
   }

=======================================
--- /trunk/test/mjsunit/fast-non-keyed.js       Wed Jun 13 11:51:58 2012 UTC
+++ /trunk/test/mjsunit/fast-non-keyed.js       Mon Jun 16 00:04:47 2014 UTC
@@ -108,6 +108,6 @@
 AddProps3(obj3);
 assertTrue(%HasFastProperties(obj3));

-var bad_name = {};
-bad_name[".foo"] = 0;
-assertFalse(%HasFastProperties(bad_name));
+var funny_name = {};
+funny_name[".foo"] = 0;
+assertTrue(%HasFastProperties(funny_name));
=======================================
--- /trunk/test/mjsunit/mjsunit.status  Thu Jun 12 00:04:49 2014 UTC
+++ /trunk/test/mjsunit/mjsunit.status  Mon Jun 16 00:04:47 2014 UTC
@@ -48,6 +48,9 @@
   # This test non-deterministically runs out of memory on Windows ia32.
   'regress/regress-crbug-160010': [SKIP],

+  # Issue 3389: deopt_every_n_garbage_collections is unsafe
+  'regress/regress-2653': [SKIP],
+
##############################################################################
   # Too slow in debug mode with --stress-opt mode.
   'compiler/regress-stacktrace-methods': [PASS, ['mode == debug', SKIP]],
=======================================
--- /trunk/tools/fuzz-harness.sh        Fri Oct  5 08:50:56 2012 UTC
+++ /trunk/tools/fuzz-harness.sh        Mon Jun 16 00:04:47 2014 UTC
@@ -85,7 +85,7 @@
     "$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
 exit_code=$(cat w* | grep " looking good" -c)
 exit_code=$((100-exit_code))
-tar -cjf fuzz-results-$(date +%y%m%d).tar.bz2 err-* w*
+tar -cjf fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2 err-* w*
 rm -f err-* w*

 echo "Total failures: $exit_code"
=======================================
--- /trunk/tools/profviz/composer.js    Wed Jun 11 00:05:05 2014 UTC
+++ /trunk/tools/profviz/composer.js    Mon Jun 16 00:04:47 2014 UTC
@@ -43,6 +43,7 @@

var kY1Offset = 11; // Offset for stack frame vs. event lines.
   var kDeoptRow = 7;                // Row displaying deopts.
+ var kGetTimeHeight = 0.5; // Height of marker displaying timed part.
   var kMaxDeoptLength = 4;          // Draw size of the largest deopt.
   var kPauseLabelPadding = 5;       // Padding for pause time labels.
   var kNumPauseLabels = 7;          // Number of biggest pauses to label.
@@ -136,6 +137,7 @@
   var code_map = new CodeMap();
   var execution_pauses = [];
   var deopts = [];
+  var gettime = [];
   var event_stack = [];
   var last_time_stamp = [];
   for (var i = 0; i < kNumThreads; i++) {
@@ -273,6 +275,10 @@
     var processCodeDeoptEvent = function(time, size) {
       deopts.push(new Deopt(time, size));
     }
+
+    var processCurrentTimeEvent = function(time) {
+      gettime.push(time);
+    }

     var processSharedLibrary = function(name, start, end) {
       var code_entry = new CodeMap.CodeEntry(end - start, name);
@@ -316,6 +322,8 @@
                             processor: processCodeDeleteEvent },
         'code-deopt':     { parsers: [parseTimeStamp, parseInt],
                             processor: processCodeDeoptEvent },
+        'current-time':   { parsers: [parseTimeStamp],
+                            processor: processCurrentTimeEvent },
         'tick':           { parsers: [parseInt, parseTimeStamp,
                                       null, null, parseInt, 'var-args'],
                             processor: processTickEvent }
@@ -391,12 +399,15 @@
     output("set xtics out nomirror");
     output("unset key");

-    function DrawBarBase(color, start, end, top, bottom) {
+    function DrawBarBase(color, start, end, top, bottom, transparency) {
       obj_index++;
       command = "set object " + obj_index + " rect";
       command += " from " + start + ", " + top;
       command += " to " + end + ", " + bottom;
       command += " fc rgb \"" + color + "\"";
+      if (transparency) {
+        command += " fs transparent solid " + transparency;
+      }
       output(command);
     }

@@ -429,6 +440,13 @@
                   deopt.time + 10 * pause_tolerance,
                   deopt.size / max_deopt_size * kMaxDeoptLength);
     }
+
+    // Plot current time polls.
+    if (gettime.length > 1) {
+      var start = gettime[0];
+      var end = gettime.pop();
+      DrawBarBase("#0000BB", start, end, kGetTimeHeight, 0, 0.2);
+    }

     // Name Y-axis.
     var ytics = [];

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to