Revision: 24149
Author:   [email protected]
Date:     Tue Sep 23 12:44:49 2014 UTC
Log:      Version 3.29.84 (based on bleeding_edge revision r24096)

Performance and stability improvements on all platforms.
https://code.google.com/p/v8/source/detail?r=24149

Deleted:
 /trunk/test/mjsunit/regress/regress-json-parse-index.js
Modified:
 /trunk/ChangeLog
 /trunk/PRESUBMIT.py
 /trunk/src/arm/code-stubs-arm.cc
 /trunk/src/arm/lithium-codegen-arm.cc
 /trunk/src/arm/lithium-codegen-arm.h
 /trunk/src/arm/macro-assembler-arm.cc
 /trunk/src/arm/macro-assembler-arm.h
 /trunk/src/arm64/code-stubs-arm64.cc
 /trunk/src/arm64/lithium-codegen-arm64.cc
 /trunk/src/arm64/lithium-codegen-arm64.h
 /trunk/src/arm64/macro-assembler-arm64.cc
 /trunk/src/arm64/macro-assembler-arm64.h
 /trunk/src/arm64/simulator-arm64.cc
 /trunk/src/base/macros.h
 /trunk/src/builtins.cc
 /trunk/src/builtins.h
 /trunk/src/code-stubs-hydrogen.cc
 /trunk/src/code-stubs.cc
 /trunk/src/code-stubs.h
 /trunk/src/compiler/arm/code-generator-arm.cc
 /trunk/src/compiler/arm/instruction-selector-arm.cc
 /trunk/src/compiler/arm64/code-generator-arm64.cc
 /trunk/src/compiler/arm64/instruction-selector-arm64-unittest.cc
 /trunk/src/compiler/arm64/instruction-selector-arm64.cc
 /trunk/src/compiler/code-generator.cc
 /trunk/src/compiler/code-generator.h
 /trunk/src/compiler/common-operator-unittest.cc
 /trunk/src/compiler/common-operator.cc
 /trunk/src/compiler/common-operator.h
 /trunk/src/compiler/graph-unittest.cc
 /trunk/src/compiler/graph-unittest.h
 /trunk/src/compiler/ia32/code-generator-ia32.cc
 /trunk/src/compiler/ia32/instruction-selector-ia32.cc
 /trunk/src/compiler/instruction-codes.h
 /trunk/src/compiler/instruction-selector-unittest.h
 /trunk/src/compiler/js-builtin-reducer.cc
 /trunk/src/compiler/js-builtin-reducer.h
 /trunk/src/compiler/js-graph.h
 /trunk/src/compiler/js-typed-lowering.cc
 /trunk/src/compiler/machine-operator-reducer-unittest.cc
 /trunk/src/compiler/machine-operator-reducer.cc
 /trunk/src/compiler/machine-operator-reducer.h
 /trunk/src/compiler/machine-operator-unittest.cc
 /trunk/src/compiler/machine-operator.cc
 /trunk/src/compiler/machine-operator.h
 /trunk/src/compiler/node-matchers.h
 /trunk/src/compiler/opcodes.h
 /trunk/src/compiler/pipeline.cc
 /trunk/src/compiler/raw-machine-assembler.h
 /trunk/src/compiler/typer.cc
 /trunk/src/compiler/x64/code-generator-x64.cc
 /trunk/src/compiler/x64/instruction-selector-x64.cc
 /trunk/src/compiler.cc
 /trunk/src/conversions-inl.h
 /trunk/src/conversions.h
 /trunk/src/counters.h
 /trunk/src/deoptimizer.h
 /trunk/src/elements-kind.h
 /trunk/src/heap/gc-idle-time-handler.h
 /trunk/src/heap-snapshot-generator-inl.h
 /trunk/src/heap-snapshot-generator.cc
 /trunk/src/heap-snapshot-generator.h
 /trunk/src/hydrogen-instructions.h
 /trunk/src/hydrogen.cc
 /trunk/src/hydrogen.h
 /trunk/src/ia32/code-stubs-ia32.cc
 /trunk/src/ia32/lithium-codegen-ia32.cc
 /trunk/src/ia32/lithium-codegen-ia32.h
 /trunk/src/ia32/macro-assembler-ia32.cc
 /trunk/src/ia32/macro-assembler-ia32.h
 /trunk/src/ic/arm/ic-arm.cc
 /trunk/src/ic/arm/ic-compiler-arm.cc
 /trunk/src/ic/arm64/ic-arm64.cc
 /trunk/src/ic/arm64/ic-compiler-arm64.cc
 /trunk/src/ic/handler-compiler.cc
 /trunk/src/ic/ia32/ic-compiler-ia32.cc
 /trunk/src/ic/ia32/ic-ia32.cc
 /trunk/src/ic/ic-compiler.cc
 /trunk/src/ic/ic.cc
 /trunk/src/ic/ic.h
 /trunk/src/ic/mips/ic-compiler-mips.cc
 /trunk/src/ic/mips/ic-mips.cc
 /trunk/src/ic/mips64/ic-compiler-mips64.cc
 /trunk/src/ic/mips64/ic-mips64.cc
 /trunk/src/ic/x64/ic-compiler-x64.cc
 /trunk/src/ic/x64/ic-x64.cc
 /trunk/src/ic/x87/handler-compiler-x87.cc
 /trunk/src/ic/x87/ic-compiler-x87.cc
 /trunk/src/ic/x87/ic-x87.cc
 /trunk/src/json-parser.h
 /trunk/src/lithium-codegen.cc
 /trunk/src/lithium-codegen.h
 /trunk/src/mips/code-stubs-mips.cc
 /trunk/src/mips/full-codegen-mips.cc
 /trunk/src/mips/lithium-codegen-mips.cc
 /trunk/src/mips/lithium-codegen-mips.h
 /trunk/src/mips/macro-assembler-mips.cc
 /trunk/src/mips/macro-assembler-mips.h
 /trunk/src/mips64/code-stubs-mips64.cc
 /trunk/src/mips64/full-codegen-mips64.cc
 /trunk/src/mips64/lithium-codegen-mips64.cc
 /trunk/src/mips64/lithium-codegen-mips64.h
 /trunk/src/mips64/macro-assembler-mips64.cc
 /trunk/src/mips64/macro-assembler-mips64.h
 /trunk/src/objects.h
 /trunk/src/runtime.cc
 /trunk/src/utils.h
 /trunk/src/version.cc
 /trunk/src/x64/code-stubs-x64.cc
 /trunk/src/x64/lithium-codegen-x64.cc
 /trunk/src/x64/lithium-codegen-x64.h
 /trunk/src/x64/macro-assembler-x64.cc
 /trunk/src/x64/macro-assembler-x64.h
 /trunk/src/x87/assembler-x87-inl.h
 /trunk/src/x87/assembler-x87.cc
 /trunk/src/x87/assembler-x87.h
 /trunk/src/x87/builtins-x87.cc
 /trunk/src/x87/code-stubs-x87.cc
 /trunk/src/x87/code-stubs-x87.h
 /trunk/src/x87/codegen-x87.cc
 /trunk/src/x87/deoptimizer-x87.cc
 /trunk/src/x87/disasm-x87.cc
 /trunk/src/x87/full-codegen-x87.cc
 /trunk/src/x87/lithium-codegen-x87.cc
 /trunk/src/x87/lithium-codegen-x87.h
 /trunk/src/x87/lithium-gap-resolver-x87.cc
 /trunk/src/x87/lithium-x87.cc
 /trunk/src/x87/lithium-x87.h
 /trunk/src/x87/macro-assembler-x87.cc
 /trunk/src/x87/macro-assembler-x87.h
 /trunk/test/cctest/cctest.status
 /trunk/test/cctest/compiler/test-js-typed-lowering.cc
 /trunk/test/cctest/compiler/test-run-machops.cc
 /trunk/test/cctest/test-debug.cc
 /trunk/test/cctest/test-disasm-x87.cc
 /trunk/test/mjsunit/keyed-named-access.js
 /trunk/test/mjsunit/regress/string-set-char-deopt.js

=======================================
--- /trunk/test/mjsunit/regress/regress-json-parse-index.js Tue Sep 23 08:38:19 2014 UTC
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-var o = JSON.parse('{"\\u0030":100}');
-assertEquals(100, o[0]);
=======================================
--- /trunk/ChangeLog    Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/ChangeLog    Tue Sep 23 12:44:49 2014 UTC
@@ -1,3 +1,8 @@
+2014-09-23: Version 3.29.84
+
+        Performance and stability improvements on all platforms.
+
+
 2014-09-23: Version 3.29.83

         Performance and stability improvements on all platforms.
=======================================
--- /trunk/PRESUBMIT.py Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/PRESUBMIT.py Tue Sep 23 12:44:49 2014 UTC
@@ -34,32 +34,6 @@
 import sys


-_EXCLUDED_PATHS = (
-    r"^test[\\\/].*",
-    r"^testing[\\\/].*",
-    r"^third_party[\\\/].*",
-    r"^tools[\\\/].*",
-)
-
-
-# Regular expression that matches code only used for test binaries
-# (best effort).
-_TEST_CODE_EXCLUDED_PATHS = (
-    r'.+-unittest\.cc',
-    # Has a method VisitForTest().
-    r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc',
-    # Test extension.
-    r'src[\\\/]extensions[\\\/]gc-extension\.cc',
-)
-
-
-_TEST_ONLY_WARNING = (
-    'You might be calling functions intended only for testing from\n'
-    'production code.  It is OK to ignore this warning if you know what\n'
-    'you are doing, as the heuristics used to detect the situation are\n'
-    'not perfect.  The commit queue will not block on this warning.')
-
-
 def _V8PresubmitChecks(input_api, output_api):
   """Runs the V8 presubmit checks."""
   import sys
@@ -139,49 +113,6 @@
   return results


-def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
-  """Attempts to prevent use of functions intended only for testing in
-  non-testing code. For now this is just a best-effort implementation
-  that ignores header files and may have some false positives. A
-  better implementation would probably need a proper C++ parser.
-  """
-  # We only scan .cc files, as the declaration of for-testing functions in
- # header files are hard to distinguish from calls to such functions without a
-  # proper C++ parser.
-  file_inclusion_pattern = r'.+\.cc'
-
-  base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
- inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern) - comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
-  exclusion_pattern = input_api.re.compile(
-    r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
-      base_function_pattern, base_function_pattern))
-
-  def FilterFile(affected_file):
-    black_list = (_EXCLUDED_PATHS +
-                  _TEST_CODE_EXCLUDED_PATHS +
-                  input_api.DEFAULT_BLACK_LIST)
-    return input_api.FilterSourceFile(
-      affected_file,
-      white_list=(file_inclusion_pattern, ),
-      black_list=black_list)
-
-  problems = []
-  for f in input_api.AffectedSourceFiles(FilterFile):
-    local_path = f.LocalPath()
-    for line_number, line in f.ChangedContents():
-      if (inclusion_pattern.search(line) and
-          not comment_pattern.search(line) and
-          not exclusion_pattern.search(line)):
-        problems.append(
-          '%s:%d\n    %s' % (local_path, line_number, line.strip()))
-
-  if problems:
- return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
-  else:
-    return []
-
-
 def _CommonChecks(input_api, output_api):
   """Checks common to both upload and commit."""
   results = []
@@ -191,8 +122,6 @@
       input_api, output_api))
   results.extend(_V8PresubmitChecks(input_api, output_api))
   results.extend(_CheckUnwantedDependencies(input_api, output_api))
-  results.extend(
-      _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
   return results


=======================================
--- /trunk/src/arm/code-stubs-arm.cc    Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/arm/code-stubs-arm.cc    Tue Sep 23 12:44:49 2014 UTC
@@ -3465,8 +3465,8 @@
   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));

-  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
-  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
+  __ JumpIfNotUniqueName(tmp1, &miss);
+  __ JumpIfNotUniqueName(tmp2, &miss);

   // Unique names are compared by identity.
   __ cmp(left, right);
@@ -3698,7 +3698,7 @@
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ ldrb(entity_name,
             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
+    __ JumpIfNotUniqueName(entity_name, miss);
     __ bind(&good);

     // Restore the properties.
@@ -3868,7 +3868,7 @@
__ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ ldrb(entry_key,
               FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
+      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
     }
   }

=======================================
--- /trunk/src/arm/lithium-codegen-arm.cc       Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/arm/lithium-codegen-arm.cc       Tue Sep 23 12:44:49 2014 UTC
@@ -319,26 +319,30 @@
   // Each entry in the jump table generates one instruction and inlines one
   // 32bit data after it.
   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
-                jump_table_.length() * 7)) {
+      deopt_jump_table_.length() * 7)) {
     Abort(kGeneratedCodeIsTooLarge);
   }

-  if (jump_table_.length() > 0) {
+  if (deopt_jump_table_.length() > 0) {
     Label needs_frame, call_deopt_entry;

     Comment(";;; -------------------- Jump table --------------------");
-    Address base = jump_table_[0].address;
+    Address base = deopt_jump_table_[0].address;

     Register entry_offset = scratch0();

-    int length = jump_table_.length();
+    int length = deopt_jump_table_.length();
     for (int i = 0; i < length; i++) {
-      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+      Deoptimizer::JumpTableEntry* table_entry = &deopt_jump_table_[i];
       __ bind(&table_entry->label);

-      DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+      Deoptimizer::BailoutType type = table_entry->bailout_type;
+      DCHECK(type == deopt_jump_table_[0].bailout_type);
       Address entry = table_entry->address;
-      DeoptComment(table_entry->reason);
+      int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+      DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+      DeoptComment(table_entry->mnemonic, table_entry->reason);

// Second-level deopt table entries are contiguous and small, so instead // of loading the full, absolute address of each one, load an immediate
@@ -842,7 +846,7 @@


 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
-                            const char* detail,
+                            const char* reason,
                             Deoptimizer::BailoutType bailout_type) {
   LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -895,35 +899,35 @@
     __ stop("trap_on_deopt", condition);
   }

-  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
-                             instr->Mnemonic(), detail);
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
   // restore caller doubles.
   if (condition == al && frame_is_built_ &&
       !info()->saves_caller_doubles()) {
-    DeoptComment(reason);
+    DeoptComment(instr->Mnemonic(), reason);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
-    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
-                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (jump_table_.is_empty() ||
-        !table_entry.IsEquivalentTo(jump_table_.last())) {
-      jump_table_.Add(table_entry, zone());
+    if (deopt_jump_table_.is_empty() ||
+        (deopt_jump_table_.last().address != entry) ||
+        (deopt_jump_table_.last().bailout_type != bailout_type) ||
+        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason, + bailout_type, !frame_is_built_);
+      deopt_jump_table_.Add(table_entry, zone());
     }
-    __ b(condition, &jump_table_.last().label);
+    __ b(condition, &deopt_jump_table_.last().label);
   }
 }


 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
-                            const char* detail) {
+                            const char* reason) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(condition, instr, detail, bailout_type);
+  DeoptimizeIf(condition, instr, reason, bailout_type);
 }


@@ -4972,22 +4976,26 @@
     __ bind(&check_false);
     __ LoadRoot(ip, Heap::kFalseValueRootIndex);
     __ cmp(scratch2, Operand(ip));
-    DeoptimizeIf(ne, instr, "cannot truncate");
+    __ RecordComment("Deferred TaggedToI: cannot truncate");
+    DeoptimizeIf(ne, instr);
     __ mov(input_reg, Operand::Zero());
   } else {
-    DeoptimizeIf(ne, instr, "not a heap number");
+    __ RecordComment("Deferred TaggedToI: not a heap number");
+    DeoptimizeIf(ne, instr);

     __ sub(ip, scratch2, Operand(kHeapObjectTag));
     __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
     __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
-    DeoptimizeIf(ne, instr, "lost precision or NaN");
+    __ RecordComment("Deferred TaggedToI: lost precision or NaN");
+    DeoptimizeIf(ne, instr);

     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ cmp(input_reg, Operand::Zero());
       __ b(ne, &done);
       __ VmovHigh(scratch1, double_scratch2);
       __ tst(scratch1, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(ne, instr, "minus zero");
+      __ RecordComment("Deferred TaggedToI: minus zero");
+      DeoptimizeIf(ne, instr);
     }
   }
   __ bind(&done);
=======================================
--- /trunk/src/arm/lithium-codegen-arm.h        Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/arm/lithium-codegen-arm.h        Tue Sep 23 12:44:49 2014 UTC
@@ -26,7 +26,7 @@
   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
       : LCodeGenBase(chunk, assembler, info),
         deoptimizations_(4, info->zone()),
-        jump_table_(4, info->zone()),
+        deopt_jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -235,9 +235,9 @@
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
   void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, Deoptimizer::BailoutType bailout_type); + const char* reason, Deoptimizer::BailoutType bailout_type);
   void DeoptimizeIf(Condition condition, LInstruction* instr,
-                    const char* detail = NULL);
+                    const char* reason = NULL);

   void AddToTranslation(LEnvironment* environment,
                         Translation* translation,
@@ -332,7 +332,7 @@
   void EmitVectorLoadICRegisters(T* instr);

   ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
=======================================
--- /trunk/src/arm/macro-assembler-arm.cc       Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/arm/macro-assembler-arm.cc       Tue Sep 23 12:44:49 2014 UTC
@@ -3199,8 +3199,8 @@
 }


-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+                                         Label* not_unique_name) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
=======================================
--- /trunk/src/arm/macro-assembler-arm.h        Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/arm/macro-assembler-arm.h        Tue Sep 23 12:44:49 2014 UTC
@@ -1340,7 +1340,7 @@
void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
                                                 Label* failure);

- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
+  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);

   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
=======================================
--- /trunk/src/arm64/code-stubs-arm64.cc        Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/arm64/code-stubs-arm64.cc        Tue Sep 23 12:44:49 2014 UTC
@@ -3370,8 +3370,8 @@

   // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
   // should have kInternalizedTag set.
-  __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
-  __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
+  __ JumpIfNotUniqueName(lhs_instance_type, &miss);
+  __ JumpIfNotUniqueName(rhs_instance_type, &miss);

   // Unique names are compared by identity.
   STATIC_ASSERT(EQUAL == 0);
@@ -4488,7 +4488,7 @@
__ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ Ldrb(entity_name,
             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
+    __ JumpIfNotUniqueName(entity_name, miss);
     __ Bind(&good);
   }

@@ -4575,7 +4575,7 @@
       // Check if the entry name is not a unique name.
__ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
+      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
     }
   }

=======================================
--- /trunk/src/arm64/lithium-codegen-arm64.cc   Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/arm64/lithium-codegen-arm64.cc   Tue Sep 23 12:44:49 2014 UTC
@@ -839,8 +839,12 @@
       Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
       __ Bind(&table_entry->label);

+      Deoptimizer::BailoutType type = table_entry->bailout_type;
       Address entry = table_entry->address;
-      DeoptComment(table_entry->reason);
+      int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+      DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+      DeoptComment(table_entry->mnemonic, table_entry->reason);

// Second-level deopt table entries are contiguous and small, so instead
       // of loading the full, absolute address of each one, load the base
@@ -989,7 +993,7 @@


 void LCodeGen::DeoptimizeBranch(
-    LInstruction* instr, const char* detail, BranchType branch_type,
+    LInstruction* instr, const char* reason, BranchType branch_type,
Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
   LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -1040,22 +1044,21 @@
     __ Bind(&dont_trap);
   }

-  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
-                             instr->Mnemonic(), detail);
   DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
   if (branch_type == always &&
       frame_is_built_ && !info()->saves_caller_doubles()) {
-    DeoptComment(reason);
+    DeoptComment(instr->Mnemonic(), reason);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
-    Deoptimizer::JumpTableEntry* table_entry =
- new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
-                                                 !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (jump_table_.is_empty() ||
-        !table_entry->IsEquivalentTo(*jump_table_.last())) {
+    if (jump_table_.is_empty() || (jump_table_.last()->address != entry) ||
+        (jump_table_.last()->bailout_type != bailout_type) ||
+        (jump_table_.last()->needs_frame != !frame_is_built_)) {
+      Deoptimizer::JumpTableEntry* table_entry =
+          new (zone()) Deoptimizer::JumpTableEntry(
+ entry, instr->Mnemonic(), reason, bailout_type, !frame_is_built_);
       jump_table_.Add(table_entry, zone());
     }
     __ B(&jump_table_.last()->label, branch_type, reg, bit);
@@ -1065,78 +1068,78 @@

 void LCodeGen::Deoptimize(LInstruction* instr,
                           Deoptimizer::BailoutType* override_bailout_type,
-                          const char* detail) {
- DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
+                          const char* reason) {
+ DeoptimizeBranch(instr, reason, always, NoReg, -1, override_bailout_type);
 }


 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
-                            const char* detail) {
-  DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
+                            const char* reason) {
+  DeoptimizeBranch(instr, reason, static_cast<BranchType>(cond));
 }


 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
-                                const char* detail) {
-  DeoptimizeBranch(instr, detail, reg_zero, rt);
+                                const char* reason) {
+  DeoptimizeBranch(instr, reason, reg_zero, rt);
 }


 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
-                                   const char* detail) {
-  DeoptimizeBranch(instr, detail, reg_not_zero, rt);
+                                   const char* reason) {
+  DeoptimizeBranch(instr, reason, reg_not_zero, rt);
 }


 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
-                                    const char* detail) {
+                                    const char* reason) {
   int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
-  DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
+  DeoptimizeIfBitSet(rt, sign_bit, instr, reason);
 }


 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
-                               const char* detail) {
-  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
+                               const char* reason) {
+  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, reason);
 }


 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
-                                  const char* detail) {
-  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
+                                  const char* reason) {
+  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, reason);
 }


 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
-                                LInstruction* instr, const char* detail) {
+                                LInstruction* instr, const char* reason) {
   __ CompareRoot(rt, index);
-  DeoptimizeIf(eq, instr, detail);
+  DeoptimizeIf(eq, instr, reason);
 }


 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* detail) { + LInstruction* instr, const char* reason) {
   __ CompareRoot(rt, index);
-  DeoptimizeIf(ne, instr, detail);
+  DeoptimizeIf(ne, instr, reason);
 }


void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
-                                     const char* detail) {
+                                     const char* reason) {
   __ TestForMinusZero(input);
-  DeoptimizeIf(vs, instr, detail);
+  DeoptimizeIf(vs, instr, reason);
 }


void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
-                                  const char* detail) {
-  DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
+                                  const char* reason) {
+  DeoptimizeBranch(instr, reason, reg_bit_set, rt, bit);
 }


void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
-                                    const char* detail) {
-  DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
+                                    const char* reason) {
+  DeoptimizeBranch(instr, reason, reg_bit_clear, rt, bit);
 }


@@ -5626,20 +5629,22 @@
     Register output = ToRegister32(instr->result());
     DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);

-    DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr,
-                        "not a heap number");
+    __ RecordComment("Deferred TaggedToI: not a heap number");
+    DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr);

     // A heap number: load value and convert to int32 using non-truncating
     // function. If the result is out of range, branch to deoptimize.
     __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
     __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
-    DeoptimizeIf(ne, instr, "lost precision or NaN");
+    __ RecordComment("Deferred TaggedToI: lost precision or NaN");
+    DeoptimizeIf(ne, instr);

     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ Cmp(output, 0);
       __ B(ne, &done);
       __ Fmov(scratch1, dbl_scratch1);
-      DeoptimizeIfNegative(scratch1, instr, "minus zero");
+      __ RecordComment("Deferred TaggedToI: minus zero");
+      DeoptimizeIfNegative(scratch1, instr);
     }
   }
   __ Bind(&done);
=======================================
--- /trunk/src/arm64/lithium-codegen-arm64.h    Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/arm64/lithium-codegen-arm64.h    Tue Sep 23 12:44:49 2014 UTC
@@ -213,35 +213,35 @@
                                    Register temp,
                                    LOperand* index,
                                    String::Encoding encoding);
-  void DeoptimizeBranch(LInstruction* instr, const char* detail,
+  void DeoptimizeBranch(LInstruction* instr, const char* reason,
                         BranchType branch_type, Register reg = NoReg,
                         int bit = -1,
Deoptimizer::BailoutType* override_bailout_type = NULL);
   void Deoptimize(LInstruction* instr,
                   Deoptimizer::BailoutType* override_bailout_type = NULL,
-                  const char* detail = NULL);
+                  const char* reason = NULL);
   void DeoptimizeIf(Condition cond, LInstruction* instr,
-                    const char* detail = NULL);
+                    const char* reason = NULL);
   void DeoptimizeIfZero(Register rt, LInstruction* instr,
-                        const char* detail = NULL);
+                        const char* reason = NULL);
   void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
-                           const char* detail = NULL);
+                           const char* reason = NULL);
   void DeoptimizeIfNegative(Register rt, LInstruction* instr,
-                            const char* detail = NULL);
+                            const char* reason = NULL);
   void DeoptimizeIfSmi(Register rt, LInstruction* instr,
-                       const char* detail = NULL);
+                       const char* reason = NULL);
   void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
-                          const char* detail = NULL);
+                          const char* reason = NULL);
   void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
-                        LInstruction* instr, const char* detail = NULL);
+                        LInstruction* instr, const char* reason = NULL);
   void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
-                           LInstruction* instr, const char* detail = NULL);
+                           LInstruction* instr, const char* reason = NULL);
   void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
-                             const char* detail = NULL);
+                             const char* reason = NULL);
   void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
-                          const char* detail = NULL);
+                          const char* reason = NULL);
   void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
-                            const char* detail = NULL);
+                            const char* reason = NULL);

   MemOperand PrepareKeyedExternalArrayOperand(Register key,
                                               Register base,
=======================================
--- /trunk/src/arm64/macro-assembler-arm64.cc   Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/arm64/macro-assembler-arm64.cc   Tue Sep 23 12:44:49 2014 UTC
@@ -2768,8 +2768,8 @@
 }


-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueName(Register type,
+                                         Label* not_unique_name) {
   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
   // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
   //   continue
=======================================
--- /trunk/src/arm64/macro-assembler-arm64.h    Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/arm64/macro-assembler-arm64.h    Tue Sep 23 12:44:49 2014 UTC
@@ -1074,7 +1074,7 @@
Register first_object_instance_type, Register second_object_instance_type,
       Register scratch1, Register scratch2, Label* failure);

- void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
+  void JumpIfNotUniqueName(Register type, Label* not_unique_name);

   // ---- Calling / Jumping helpers ----

=======================================
--- /trunk/src/arm64/simulator-arm64.cc Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/arm64/simulator-arm64.cc Tue Sep 23 12:44:49 2014 UTC
@@ -1855,12 +1855,9 @@
 void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
   if ((address >= stack_limit_) && (address < stack)) {
     fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
-    fprintf(stream_, "  sp is here:          0x%016" PRIx64 "\n",
-            static_cast<uint64_t>(stack));
-    fprintf(stream_, "  access was here:     0x%016" PRIx64 "\n",
-            static_cast<uint64_t>(address));
-    fprintf(stream_, "  stack limit is here: 0x%016" PRIx64 "\n",
-            static_cast<uint64_t>(stack_limit_));
+    fprintf(stream_, "  sp is here:          0x%016" PRIx64 "\n", stack);
+    fprintf(stream_, "  access was here:     0x%016" PRIx64 "\n", address);
+ fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n", stack_limit_);
     fprintf(stream_, "\n");
     FATAL("ACCESS BELOW STACK POINTER");
   }
=======================================
--- /trunk/src/base/macros.h    Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/base/macros.h    Tue Sep 23 12:44:49 2014 UTC
@@ -230,7 +230,7 @@
 // WARNING: if Dest or Source is a non-POD type, the result of the memcpy
 // is likely to surprise you.
 template <class Dest, class Source>
-V8_INLINE Dest bit_cast(Source const& source) {
+inline Dest bit_cast(const Source& source) {
   COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);

   Dest dest;
=======================================
--- /trunk/src/builtins.cc      Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/builtins.cc      Tue Sep 23 12:44:49 2014 UTC
@@ -1287,6 +1287,11 @@
 static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
   KeyedLoadIC::GeneratePreMonomorphic(masm);
 }
+
+
+static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateSloppyArguments(masm);
+}


 static void Generate_StoreIC_Miss(MacroAssembler* masm) {
=======================================
--- /trunk/src/builtins.h       Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/builtins.h       Tue Sep 23 12:44:49 2014 UTC
@@ -89,6 +89,7 @@
kNoExtraICState) \ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, kNoExtraICState) \ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \ + V(KeyedLoadIC_SloppyArguments, KEYED_LOAD_IC, MONOMORPHIC, kNoExtraICState) \ \ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, StoreIC::kStrictModeState) \ \
=======================================
--- /trunk/src/code-stubs-hydrogen.cc   Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/code-stubs-hydrogen.cc   Tue Sep 23 12:44:49 2014 UTC
@@ -71,8 +71,6 @@
     MULTIPLE
   };

-  HValue* UnmappedCase(HValue* elements, HValue* key);
-
   HValue* BuildArrayConstructor(ElementsKind kind,
                                 AllocationSiteOverrideMode override_mode,
                                 ArgumentClass argument_class);
@@ -600,122 +598,6 @@


Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
-
-
-HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
-  HValue* result;
-  HInstruction* backing_store = Add<HLoadKeyed>(
-      elements, graph()->GetConstant1(), static_cast<HValue*>(NULL),
-      FAST_ELEMENTS, ALLOW_RETURN_HOLE);
-  Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
-  HValue* backing_store_length =
-      Add<HLoadNamedField>(backing_store, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForFixedArrayLength());
-  IfBuilder in_unmapped_range(this);
-  in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length,
-                                                 Token::LT);
-  in_unmapped_range.Then();
-  {
- result = Add<HLoadKeyed>(backing_store, key, static_cast<HValue*>(NULL),
-                             FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
-  }
-  in_unmapped_range.ElseDeopt("Outside of range");
-  in_unmapped_range.End();
-  return result;
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
-  HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
-  HValue* key = GetParameter(LoadDescriptor::kNameIndex);
-
- // Mapped arguments are actual arguments. Unmapped arguments are values added - // to the arguments object after it was created for the call. Mapped arguments - // are stored in the context at indexes given by elements[key + 2]. Unmapped - // arguments are stored as regular indexed properties in the arguments array, - // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed
-  // look at argument object construction.
-  //
-  // The sloppy arguments elements array has a special format:
-  //
-  // 0: context
-  // 1: unmapped arguments array
-  // 2: mapped_index0,
-  // 3: mapped_index1,
-  // ...
-  //
- // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments).
-  // If key + 2 >= elements.length then attempt to look in the unmapped
- // arguments array (given by elements[1]) and return the value at key, missing - // to the runtime if the unmapped arguments array is not a fixed array or if
-  // key >= unmapped_arguments_array.length.
-  //
- // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value - // in the unmapped arguments array, as described above. Otherwise, t is a Smi
-  // index into the context array given at elements[0]. Return the value at
-  // context[t].
-
-  key = AddUncasted<HForceRepresentation>(key, Representation::Smi());
-  IfBuilder positive_smi(this);
-  positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
-                                            Token::LT);
-  positive_smi.ThenDeopt("key is negative");
-  positive_smi.End();
-
-  HValue* constant_two = Add<HConstant>(2);
-  HValue* elements = AddLoadElements(receiver, static_cast<HValue*>(NULL));
-  HValue* elements_length =
-      Add<HLoadNamedField>(elements, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForFixedArrayLength());
- HValue* adjusted_length = AddUncasted<HSub>(elements_length, constant_two);
-  IfBuilder in_range(this);
-  in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT);
-  in_range.Then();
-  {
-    HValue* index = AddUncasted<HAdd>(key, constant_two);
-    HInstruction* mapped_index =
-        Add<HLoadKeyed>(elements, index, static_cast<HValue*>(NULL),
-                        FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
-
-    IfBuilder is_valid(this);
-    is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
-                                              graph()->GetConstantHole());
-    is_valid.Then();
-    {
-      // TODO(mvstanton): I'd like to assert from this point, that if the
- // mapped_index is not the hole that it is indeed, a smi. An unnecessary
-      // smi check is being emitted.
-      HValue* the_context =
-          Add<HLoadKeyed>(elements, graph()->GetConstant0(),
-                          static_cast<HValue*>(NULL), FAST_ELEMENTS);
-      DCHECK(Context::kHeaderSize == FixedArray::kHeaderSize);
-      HValue* result =
- Add<HLoadKeyed>(the_context, mapped_index, static_cast<HValue*>(NULL),
-                          FAST_ELEMENTS, ALLOW_RETURN_HOLE);
-      environment()->Push(result);
-    }
-    is_valid.Else();
-    {
-      HValue* result = UnmappedCase(elements, key);
-      environment()->Push(result);
-    }
-    is_valid.End();
-  }
-  in_range.Else();
-  {
-    HValue* result = UnmappedCase(elements, key);
-    environment()->Push(result);
-  }
-  in_range.End();
-
-  return environment()->Pop();
-}
-
-
-Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() {
-  return DoGenerateCode(this);
-}


 void CodeStubGraphBuilderBase::BuildStoreNamedField(
@@ -1210,6 +1092,7 @@
 template <>
 HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
   StoreGlobalStub* stub = casted_stub();
+  Handle<Object> hole(isolate()->heap()->the_hole_value(), isolate());
   Handle<Object> placeholer_value(Smi::FromInt(0), isolate());
   Handle<PropertyCell> placeholder_cell =
       isolate()->factory()->NewPropertyCell(placeholer_value);
@@ -1241,7 +1124,7 @@
     // property has been deleted and that the store must be handled by the
     // runtime.
     IfBuilder builder(this);
-    HValue* hole_value = graph()->GetConstantHole();
+    HValue* hole_value = Add<HConstant>(hole);
     builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
     builder.Then();
     builder.Deopt("Unexpected cell contents in global store");
=======================================
--- /trunk/src/code-stubs.cc    Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/code-stubs.cc    Tue Sep 23 12:44:49 2014 UTC
@@ -586,14 +586,12 @@
 void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   if (kind() == Code::STORE_IC) {
     descriptor->Initialize(FUNCTION_ADDR(StoreIC_MissFromStubFailure));
-  } else if (kind() == Code::KEYED_LOAD_IC) {
-    descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
   }
 }


 CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() {
-  if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
+  if (kind() == Code::LOAD_IC) {
     return LoadDescriptor(isolate());
   } else {
     DCHECK_EQ(Code::STORE_IC, kind());
=======================================
--- /trunk/src/code-stubs.h     Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/code-stubs.h     Tue Sep 23 12:44:49 2014 UTC
@@ -82,7 +82,6 @@
   /* IC Handler stubs */                    \
   V(LoadConstant)                           \
   V(LoadField)                              \
-  V(KeyedLoadSloppyArguments)               \
   V(StoreField)                             \
   V(StoreGlobal)                            \
   V(StringLength)
@@ -915,20 +914,6 @@
 };


-class KeyedLoadSloppyArgumentsStub : public HandlerStub {
- public:
-  explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
-      : HandlerStub(isolate) {}
-
- protected:
-  virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
-  virtual Code::StubType GetStubType() { return Code::FAST; }
-
- private:
-  DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
-};
-
-
 class LoadConstantStub : public HandlerStub {
  public:
   LoadConstantStub(Isolate* isolate, int constant_index)
=======================================
--- /trunk/src/compiler/arm/code-generator-arm.cc Tue Sep 23 10:40:53 2014 UTC +++ /trunk/src/compiler/arm/code-generator-arm.cc Tue Sep 23 12:44:49 2014 UTC
@@ -136,8 +136,13 @@
   ArmOperandConverter i(this, instr);

   switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchCallAddress: {
+      DirectCEntryStub stub(isolate());
+      stub.GenerateCall(masm(), i.InputRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArchCallCodeObject: {
-      EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -151,7 +156,6 @@
       break;
     }
     case kArchCallJSFunction: {
-      EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -165,6 +169,13 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArchDrop: {
+      int words = MiscField::decode(instr->opcode());
+      __ Drop(words);
+      DCHECK_LT(0, words);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArchJmp:
       __ b(code_->GetLabel(i.InputBlock(0)));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -844,27 +855,6 @@
 void CodeGenerator::AddNopForSmiCodeInlining() {
   // On 32-bit ARM we do not insert nops for inlined Smi code.
 }
-
-
-void CodeGenerator::EnsureSpaceForLazyDeopt() {
-  int space_needed = Deoptimizer::patch_size();
-  if (!linkage()->info()->IsStub()) {
-    // Ensure that we have enough space after the previous lazy-bailout
-    // instruction for patching the code here.
-    int current_pc = masm()->pc_offset();
-    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
-      // Block literal pool emission for duration of padding.
- v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
-      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
-      DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
-      while (padding_size > 0) {
-        __ nop();
-        padding_size -= v8::internal::Assembler::kInstrSize;
-      }
-    }
-  }
-  MarkLazyDeoptSite();
-}

 #undef __

=======================================
--- /trunk/src/compiler/arm/instruction-selector-arm.cc Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/arm/instruction-selector-arm.cc Tue Sep 23 12:44:49 2014 UTC
@@ -68,8 +68,10 @@
       case kArmStrh:
         return value >= -255 && value <= 255;

+      case kArchCallAddress:
       case kArchCallCodeObject:
       case kArchCallJSFunction:
+      case kArchDrop:
       case kArchJmp:
       case kArchNop:
       case kArchRet:
@@ -801,6 +803,9 @@
       opcode = kArchCallCodeObject;
       break;
     }
+    case CallDescriptor::kCallAddress:
+      opcode = kArchCallAddress;
+      break;
     case CallDescriptor::kCallJSFunction:
       opcode = kArchCallJSFunction;
       break;
@@ -820,6 +825,13 @@
     DCHECK(continuation != NULL);
     call_instr->MarkAsControl();
   }
+
+  // Caller clean up of stack for C-style calls.
+  if (descriptor->kind() == CallDescriptor::kCallAddress &&
+      !buffer.pushed_nodes.empty()) {
+    DCHECK(deoptimization == NULL && continuation == NULL);
+    Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL);
+  }
 }


=======================================
--- /trunk/src/compiler/arm64/code-generator-arm64.cc Tue Sep 23 10:40:53 2014 UTC +++ /trunk/src/compiler/arm64/code-generator-arm64.cc Tue Sep 23 12:44:49 2014 UTC
@@ -131,8 +131,12 @@
   Arm64OperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
   switch (ArchOpcodeField::decode(opcode)) {
+    case kArchCallAddress: {
+      DirectCEntryStub stub(isolate());
+      stub.GenerateCall(masm(), i.InputRegister(0));
+      break;
+    }
     case kArchCallCodeObject: {
-      EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -145,7 +149,6 @@
       break;
     }
     case kArchCallJSFunction: {
-      EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -160,6 +163,11 @@
       AddSafepointAndDeopt(instr);
       break;
     }
+    case kArchDrop: {
+      int words = MiscField::decode(instr->opcode());
+      __ Drop(words);
+      break;
+    }
     case kArchJmp:
       __ B(code_->GetLabel(i.InputBlock(0)));
       break;
@@ -845,29 +853,6 @@


 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
-
-
-void CodeGenerator::EnsureSpaceForLazyDeopt() {
-  int space_needed = Deoptimizer::patch_size();
-  if (!linkage()->info()->IsStub()) {
-    // Ensure that we have enough space after the previous lazy-bailout
-    // instruction for patching the code here.
-    intptr_t current_pc = masm()->pc_offset();
-
-    if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
- intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
-      DCHECK((padding_size % kInstructionSize) == 0);
-      InstructionAccurateScope instruction_accurate(
-          masm(), padding_size / kInstructionSize);
-
-      while (padding_size > 0) {
-        __ nop();
-        padding_size -= kInstructionSize;
-      }
-    }
-  }
-  MarkLazyDeoptSite();
-}

 #undef __

=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64-unittest.cc Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/arm64/instruction-selector-arm64-unittest.cc Tue Sep 23 12:44:49 2014 UTC
@@ -30,26 +30,6 @@
 std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
   return os << mi.constructor_name;
 }
-
-
-// Helper to build Int32Constant or Int64Constant depending on the given
-// machine type.
-Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
-                    int64_t value) {
-  switch (type) {
-    case kMachInt32:
-      return m.Int32Constant(value);
-      break;
-
-    case kMachInt64:
-      return m.Int64Constant(value);
-      break;
-
-    default:
-      UNIMPLEMENTED();
-  }
-  return NULL;
-}


 // ARM64 logical instructions.
@@ -306,13 +286,13 @@
   const MachineType type = dpi.machine_type;
   TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
     StreamBuilder m(this, type, type);
- m.Return((m.*dpi.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
     EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
     ASSERT_EQ(2U, s[0]->InputCount());
     EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
-    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
     EXPECT_EQ(1U, s[0]->OutputCount());
   }
 }
@@ -324,7 +304,7 @@

   TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
     StreamBuilder m(this, type, type);
- m.Return((m.*dpi.constructor)(BuildConstant(m, type, imm), m.Parameter(0)));
+    m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
     Stream s = m.Build();

     // Add can support an immediate on the left by commuting, but Sub can't
@@ -334,7 +314,7 @@
       EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
       ASSERT_EQ(2U, s[0]->InputCount());
       EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
-      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
       EXPECT_EQ(1U, s[0]->OutputCount());
     }
   }
@@ -1024,35 +1004,38 @@
 TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
   const MachInst2 cmp = GetParam();
   const MachineType type = cmp.machine_type;
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    // Compare with 0 are turned into tst instruction.
-    if (imm == 0) continue;
-    StreamBuilder m(this, type, type);
- m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    // Compare with 0 are turned into tst instruction.
-    if (imm == 0) continue;
-    StreamBuilder m(this, type, type);
- m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  // TODO(all): Add support for testing 64-bit immediates.
+  if (type == kMachInt32) {
+    TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+      // Compare with 0 are turned into tst instruction.
+      if (imm == 0) continue;
+      StreamBuilder m(this, type, type);
+      m.Return((m.*cmp.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kEqual, s[0]->flags_condition());
+    }
+    TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+      // Compare with 0 are turned into tst instruction.
+      if (imm == 0) continue;
+      StreamBuilder m(this, type, type);
+      m.Return((m.*cmp.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kEqual, s[0]->flags_condition());
+    }
   }
 }

=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64.cc Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/arm64/instruction-selector-arm64.cc Tue Sep 23 12:44:49 2014 UTC
@@ -37,13 +37,9 @@
   }

   bool CanBeImmediate(Node* node, ImmediateMode mode) {
-    int64_t value;
-    if (node->opcode() == IrOpcode::kInt32Constant)
-      value = OpParameter<int32_t>(node);
-    else if (node->opcode() == IrOpcode::kInt64Constant)
-      value = OpParameter<int64_t>(node);
-    else
-      return false;
+    Int32Matcher m(node);
+    if (!m.HasValue()) return false;
+    int64_t value = m.Value();
     unsigned ignored;
     switch (mode) {
       case kLogical32Imm:
@@ -111,12 +107,11 @@


 // Shared routine for multiple binary operations.
-template <typename Matcher>
 static void VisitBinop(InstructionSelector* selector, Node* node,
                        InstructionCode opcode, ImmediateMode operand_mode,
                        FlagsContinuation* cont) {
   Arm64OperandGenerator g(selector);
-  Matcher m(node);
+  Int32BinopMatcher m(node);
   InstructionOperand* inputs[4];
   size_t input_count = 0;
   InstructionOperand* outputs[2];
@@ -147,11 +142,10 @@


 // Shared routine for multiple binary operations.
-template <typename Matcher>
 static void VisitBinop(InstructionSelector* selector, Node* node,
                        ArchOpcode opcode, ImmediateMode operand_mode) {
   FlagsContinuation cont;
-  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+  VisitBinop(selector, node, opcode, operand_mode, &cont);
 }


@@ -268,22 +262,22 @@


 void InstructionSelector::VisitWord32And(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kArm64And32, kLogical32Imm);
+  VisitBinop(this, node, kArm64And32, kLogical32Imm);
 }


 void InstructionSelector::VisitWord64And(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kArm64And, kLogical64Imm);
+  VisitBinop(this, node, kArm64And, kLogical64Imm);
 }


 void InstructionSelector::VisitWord32Or(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kArm64Or32, kLogical32Imm);
+  VisitBinop(this, node, kArm64Or32, kLogical32Imm);
 }


 void InstructionSelector::VisitWord64Or(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kArm64Or, kLogical64Imm);
+  VisitBinop(this, node, kArm64Or, kLogical64Imm);
 }


@@ -293,7 +287,7 @@
   if (m.right().Is(-1)) {
Emit(kArm64Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
   } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kArm64Xor32, kLogical32Imm);
+    VisitBinop(this, node, kArm64Xor32, kLogical32Imm);
   }
 }

@@ -304,7 +298,7 @@
   if (m.right().Is(-1)) {
Emit(kArm64Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
   } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kArm64Xor, kLogical32Imm);
+    VisitBinop(this, node, kArm64Xor, kLogical32Imm);
   }
 }

@@ -350,12 +344,12 @@


 void InstructionSelector::VisitInt32Add(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
+  VisitBinop(this, node, kArm64Add32, kArithmeticImm);
 }


 void InstructionSelector::VisitInt64Add(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
+  VisitBinop(this, node, kArm64Add, kArithmeticImm);
 }


@@ -366,7 +360,7 @@
     Emit(kArm64Neg32, g.DefineAsRegister(node),
          g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
+    VisitBinop(this, node, kArm64Sub32, kArithmeticImm);
   }
 }

@@ -377,7 +371,7 @@
   if (m.left().Is(0)) {
Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
+    VisitBinop(this, node, kArm64Sub, kArithmeticImm);
   }
 }

@@ -508,13 +502,13 @@

 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) { - VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
+  VisitBinop(this, node, kArm64Add32, kArithmeticImm, cont);
 }


 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
FlagsContinuation* cont) { - VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont);
+  VisitBinop(this, node, kArm64Sub32, kArithmeticImm, cont);
 }


@@ -630,8 +624,12 @@
   InitializeCallBuffer(call, &buffer, true, false);

   // Push the arguments to the stack.
+  bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
   bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
   int aligned_push_count = buffer.pushed_nodes.size();
+  if (is_c_frame && pushed_count_uneven) {
+    aligned_push_count++;
+  }
   // TODO(dcarney): claim and poke probably take small immediates,
   //                loop here or whatever.
   // Bump the stack pointer(s).
@@ -646,7 +644,8 @@
     // Emit the uneven pushes.
     if (pushed_count_uneven) {
       Node* input = buffer.pushed_nodes[slot];
- Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
+      ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
+      Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
       slot--;
     }
     // Now all pushes can be done in pairs.
@@ -664,6 +663,9 @@
       opcode = kArchCallCodeObject;
       break;
     }
+    case CallDescriptor::kCallAddress:
+      opcode = kArchCallAddress;
+      break;
     case CallDescriptor::kCallJSFunction:
       opcode = kArchCallJSFunction;
       break;
@@ -683,6 +685,12 @@
     DCHECK(continuation != NULL);
     call_instr->MarkAsControl();
   }
+
+  // Caller clean up of stack for C-style calls.
+  if (is_c_frame && aligned_push_count > 0) {
+    DCHECK(deoptimization == NULL && continuation == NULL);
+    Emit(kArchDrop | MiscField::encode(aligned_push_count), NULL);
+  }
 }

 }  // namespace compiler
=======================================
--- /trunk/src/compiler/code-generator.cc       Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/compiler/code-generator.cc       Tue Sep 23 12:44:49 2014 UTC
@@ -21,8 +21,7 @@
       safepoints_(code->zone()),
       deoptimization_states_(code->zone()),
       deoptimization_literals_(code->zone()),
-      translations_(code->zone()),
-      last_lazy_deopt_pc_(0) {}
+      translations_(code->zone()) {}


 Handle<Code> CodeGenerator::GenerateCode() {
@@ -243,7 +242,6 @@
   }

   if (needs_frame_state) {
-    MarkLazyDeoptSite();
     // If the frame state is present, it starts at argument 1
     // (just after the code address).
     InstructionOperandConverter converter(this, instr);
@@ -389,7 +387,8 @@
             isolate()->factory()->NewNumberFromInt(constant.ToInt32());
         break;
       case Constant::kFloat64:
- constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
+        constant_object =
+            isolate()->factory()->NewHeapNumber(constant.ToFloat64());
         break;
       case Constant::kHeapObject:
         constant_object = constant.ToHeapObject();
@@ -403,11 +402,6 @@
     UNREACHABLE();
   }
 }
-
-
-void CodeGenerator::MarkLazyDeoptSite() {
-  last_lazy_deopt_pc_ = masm()->pc_offset();
-}

 #if !V8_TURBOFAN_BACKEND

=======================================
--- /trunk/src/compiler/code-generator.h        Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/compiler/code-generator.h        Tue Sep 23 12:44:49 2014 UTC
@@ -98,10 +98,8 @@
void AddTranslationForOperand(Translation* translation, Instruction* instr,
                                 InstructionOperand* op);
   void AddNopForSmiCodeInlining();
-  void EnsureSpaceForLazyDeopt();
-  void MarkLazyDeoptSite();
+ // ===========================================================================

- // ===========================================================================
   struct DeoptimizationState : ZoneObject {
    public:
     BailoutId bailout_id() const { return bailout_id_; }
@@ -128,7 +126,6 @@
   ZoneDeque<DeoptimizationState*> deoptimization_states_;
   ZoneDeque<Handle<Object> > deoptimization_literals_;
   TranslationBuffer translations_;
-  int last_lazy_deopt_pc_;
 };

 }  // namespace compiler
=======================================
--- /trunk/src/compiler/common-operator-unittest.cc Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/common-operator-unittest.cc Tue Sep 23 12:44:49 2014 UTC
@@ -4,8 +4,6 @@

 #include "src/compiler/common-operator.h"

-#include <limits>
-
 #include "src/compiler/operator-properties-inl.h"
 #include "src/test/test-utils.h"

@@ -134,26 +132,9 @@

 const int kArguments[] = {1, 5, 6, 42, 100, 10000, kMaxInt};

-const float kFloat32Values[] = {
-    std::numeric_limits<float>::min(), -1.0f, -0.0f, 0.0f, 1.0f,
-    std::numeric_limits<float>::max()};
-
 }  // namespace


-TEST_F(CommonOperatorTest, Float32Constant) {
-  TRACED_FOREACH(float, value, kFloat32Values) {
-    const Operator* op = common()->Float32Constant(value);
-    EXPECT_FLOAT_EQ(value, OpParameter<float>(op));
-    EXPECT_EQ(0, OperatorProperties::GetValueInputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
-    EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
-  }
-}
-
-
 TEST_F(CommonOperatorTest, ValueEffect) {
   TRACED_FOREACH(int, arguments, kArguments) {
     const Operator* op = common()->ValueEffect(arguments);
=======================================
--- /trunk/src/compiler/common-operator.cc      Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/common-operator.cc      Tue Sep 23 12:44:49 2014 UTC
@@ -135,13 +135,6 @@
   return new (zone()) Operator1<int64_t>(
IrOpcode::kInt64Constant, Operator::kPure, 0, 1, "Int64Constant", value);
 }
-
-
-const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
-  return new (zone())
-      Operator1<float>(IrOpcode::kFloat32Constant, Operator::kPure, 0, 1,
-                       "Float32Constant", value);
-}


const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) {
=======================================
--- /trunk/src/compiler/common-operator.h       Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/common-operator.h       Tue Sep 23 12:44:49 2014 UTC
@@ -84,7 +84,6 @@

   const Operator* Int32Constant(int32_t);
   const Operator* Int64Constant(int64_t);
-  const Operator* Float32Constant(volatile float);
   const Operator* Float64Constant(volatile double);
   const Operator* ExternalConstant(const ExternalReference&);
   const Operator* NumberConstant(volatile double);
=======================================
--- /trunk/src/compiler/graph-unittest.cc       Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/graph-unittest.cc       Tue Sep 23 12:44:49 2014 UTC
@@ -44,12 +44,7 @@
 }


-Node* GraphTest::Float32Constant(volatile float value) {
-  return graph()->NewNode(common()->Float32Constant(value));
-}
-
-
-Node* GraphTest::Float64Constant(volatile double value) {
+Node* GraphTest::Float64Constant(double value) {
   return graph()->NewNode(common()->Float64Constant(value));
 }

@@ -64,7 +59,7 @@
 }


-Node* GraphTest::NumberConstant(volatile double value) {
+Node* GraphTest::NumberConstant(double value) {
   return graph()->NewNode(common()->NumberConstant(value));
 }

@@ -667,12 +662,6 @@
   return MakeMatcher(
new IsConstantMatcher<int64_t>(IrOpcode::kInt64Constant, value_matcher));
 }
-
-
-Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher) {
-  return MakeMatcher(
- new IsConstantMatcher<float>(IrOpcode::kFloat32Constant, value_matcher));
-}


 Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher) {
=======================================
--- /trunk/src/compiler/graph-unittest.h        Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/graph-unittest.h        Tue Sep 23 12:44:49 2014 UTC
@@ -31,11 +31,10 @@

  protected:
   Node* Parameter(int32_t index);
-  Node* Float32Constant(volatile float value);
-  Node* Float64Constant(volatile double value);
+  Node* Float64Constant(double value);
   Node* Int32Constant(int32_t value);
   Node* Int64Constant(int64_t value);
-  Node* NumberConstant(volatile double value);
+  Node* NumberConstant(double value);
   Node* HeapConstant(const Unique<HeapObject>& value);
   Node* FalseConstant();
   Node* TrueConstant();
@@ -66,7 +65,6 @@
     const Matcher<ExternalReference>& value_matcher);
 Matcher<Node*> IsHeapConstant(
     const Matcher<Unique<HeapObject> >& value_matcher);
-Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
 Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
 Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
 Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
=======================================
--- /trunk/src/compiler/ia32/code-generator-ia32.cc Tue Sep 23 10:40:53 2014 UTC +++ /trunk/src/compiler/ia32/code-generator-ia32.cc Tue Sep 23 12:44:49 2014 UTC
@@ -111,8 +111,16 @@
   IA32OperandConverter i(this, instr);

   switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchCallAddress:
+      if (HasImmediateInput(instr, 0)) {
+ // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
+        __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
+                RelocInfo::RUNTIME_ENTRY);
+      } else {
+        __ call(i.InputRegister(0));
+      }
+      break;
     case kArchCallCodeObject: {
-      EnsureSpaceForLazyDeopt();
       if (HasImmediateInput(instr, 0)) {
         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
         __ call(code, RelocInfo::CODE_TARGET);
@@ -124,7 +132,6 @@
       break;
     }
     case kArchCallJSFunction: {
-      EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -135,6 +142,11 @@
       AddSafepointAndDeopt(instr);
       break;
     }
+    case kArchDrop: {
+      int words = MiscField::decode(instr->opcode());
+      __ add(esp, Immediate(kPointerSize * words));
+      break;
+    }
     case kArchJmp:
       __ jmp(code()->GetLabel(i.InputBlock(0)));
       break;
@@ -933,21 +945,6 @@


 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
-void CodeGenerator::EnsureSpaceForLazyDeopt() {
-  int space_needed = Deoptimizer::patch_size();
-  if (!linkage()->info()->IsStub()) {
-    // Ensure that we have enough space after the previous lazy-bailout
-    // instruction for patching the code here.
-    int current_pc = masm()->pc_offset();
-    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
-      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
-      __ Nop(padding_size);
-    }
-  }
-  MarkLazyDeoptSite();
-}

 #undef __

=======================================
--- /trunk/src/compiler/ia32/instruction-selector-ia32.cc Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/ia32/instruction-selector-ia32.cc Tue Sep 23 12:44:49 2014 UTC
@@ -531,6 +531,9 @@
       opcode = kArchCallCodeObject;
       break;
     }
+    case CallDescriptor::kCallAddress:
+      opcode = kArchCallAddress;
+      break;
     case CallDescriptor::kCallJSFunction:
       opcode = kArchCallJSFunction;
       break;
@@ -550,6 +553,13 @@
     DCHECK(continuation != NULL);
     call_instr->MarkAsControl();
   }
+
+  // Caller clean up of stack for C-style calls.
+  if (descriptor->kind() == CallDescriptor::kCallAddress &&
+      buffer.pushed_nodes.size() > 0) {
+    DCHECK(deoptimization == NULL && continuation == NULL);
+    Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL);
+  }
 }

 }  // namespace compiler
=======================================
--- /trunk/src/compiler/instruction-codes.h     Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/instruction-codes.h     Tue Sep 23 12:44:49 2014 UTC
@@ -29,8 +29,10 @@
 // Target-specific opcodes that specify which assembly sequence to emit.
 // Most opcodes specify a single instruction.
 #define ARCH_OPCODE_LIST(V) \
+  V(ArchCallAddress)        \
   V(ArchCallCodeObject)     \
   V(ArchCallJSFunction)     \
+  V(ArchDrop)               \
   V(ArchJmp)                \
   V(ArchNop)                \
   V(ArchRet)                \
=======================================
--- /trunk/src/compiler/instruction-selector-unittest.h Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/instruction-selector-unittest.h Tue Sep 23 12:44:49 2014 UTC
@@ -146,10 +146,6 @@
     int32_t ToInt32(const InstructionOperand* operand) const {
       return ToConstant(operand).ToInt32();
     }
-
-    int64_t ToInt64(const InstructionOperand* operand) const {
-      return ToConstant(operand).ToInt64();
-    }

     int ToVreg(const InstructionOperand* operand) const {
       if (operand->IsConstant()) return operand->index();
=======================================
--- /trunk/src/compiler/js-builtin-reducer.cc   Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/compiler/js-builtin-reducer.cc   Tue Sep 23 12:44:49 2014 UTC
@@ -34,49 +34,34 @@
   // constant callee being a well-known builtin with a BuiltinFunctionId.
   bool HasBuiltinFunctionId() {
     if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
-    HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
-    if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false;
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
-    return function->shared()->HasBuiltinFunctionId();
+ HeapObjectMatcher<JSFunction> m(NodeProperties::GetValueInput(node_, 0)); + return m.HasValue() && m.Value().handle()->shared()->HasBuiltinFunctionId();
   }

   // Retrieves the BuiltinFunctionId as described above.
   BuiltinFunctionId GetBuiltinFunctionId() {
     DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
-    HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
-    return function->shared()->builtin_function_id();
+ HeapObjectMatcher<JSFunction> m(NodeProperties::GetValueInput(node_, 0));
+    return m.Value().handle()->shared()->builtin_function_id();
   }
-
-  // Determines whether the call takes zero inputs.
-  bool InputsMatchZero() { return GetJSCallArity() == 0; }

   // Determines whether the call takes one input of the given type.
-  bool InputsMatchOne(Type* t1) {
+  bool InputsMatch(Type* t1) {
     return GetJSCallArity() == 1 &&
            NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1);
   }

   // Determines whether the call takes two inputs of the given types.
-  bool InputsMatchTwo(Type* t1, Type* t2) {
+  bool InputsMatch(Type* t1, Type* t2) {
     return GetJSCallArity() == 2 &&
            NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1) &&
            NodeProperties::GetBounds(GetJSCallInput(1)).upper->Is(t2);
   }
-
-  // Determines whether the call takes inputs all of the given type.
-  bool InputsMatchAll(Type* t) {
-    for (int i = 0; i < GetJSCallArity(); i++) {
-      if (!NodeProperties::GetBounds(GetJSCallInput(i)).upper->Is(t)) {
-        return false;
-      }
-    }
-    return true;
-  }

   Node* left() { return GetJSCallInput(0); }
   Node* right() { return GetJSCallInput(1); }

+ protected:
   int GetJSCallArity() {
     DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
     // Skip first (i.e. callee) and second (i.e. receiver) operand.
@@ -93,44 +78,12 @@
  private:
   Node* node_;
 };
-
-
-// ECMA-262, section 15.8.2.11.
-Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
-  JSCallReduction r(node);
-  if (r.InputsMatchZero()) {
-    // Math.max() -> -Infinity
-    return Replace(jsgraph()->Constant(-V8_INFINITY));
-  }
-  if (r.InputsMatchOne(Type::Number())) {
-    // Math.max(a:number) -> a
-    return Replace(r.left());
-  }
-  if (r.InputsMatchAll(Type::Integral32())) {
-    // Math.max(a:int32, b:int32, ...)
-    Node* value = r.GetJSCallInput(0);
-    for (int i = 1; i < r.GetJSCallArity(); i++) {
-      Node* p = r.GetJSCallInput(i);
-      Node* control = graph()->start();
- Node* tag = graph()->NewNode(simplified()->NumberLessThan(), value, p);
-
-      Node* branch = graph()->NewNode(common()->Branch(), tag, control);
-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-
- value = graph()->NewNode(common()->Phi(kMachNone, 2), p, value, merge);
-    }
-    return Replace(value);
-  }
-  return NoChange();
-}


 // ES6 draft 08-24-14, section 20.2.2.19.
 Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) {
+  if (r.InputsMatch(Type::Integral32(), Type::Integral32())) {
     // Math.imul(a:int32, b:int32) -> Int32Mul(a, b)
Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(), r.right());
     return Replace(value);
@@ -145,8 +98,6 @@
   // Dispatch according to the BuiltinFunctionId if present.
   if (!r.HasBuiltinFunctionId()) return NoChange();
   switch (r.GetBuiltinFunctionId()) {
-    case kMathMax:
-      return ReplaceWithPureReduction(node, ReduceMathMax(node));
     case kMathImul:
       return ReplaceWithPureReduction(node, ReduceMathImul(node));
     default:
=======================================
--- /trunk/src/compiler/js-builtin-reducer.h    Tue Sep 23 10:40:53 2014 UTC
+++ /trunk/src/compiler/js-builtin-reducer.h    Tue Sep 23 12:44:49 2014 UTC
@@ -24,13 +24,11 @@
   virtual Reduction Reduce(Node* node) OVERRIDE;

  private:
-  JSGraph* jsgraph() const { return jsgraph_; }
-  Graph* graph() const { return jsgraph_->graph(); }
-  CommonOperatorBuilder* common() const { return jsgraph_->common(); }
-  MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
+  Graph* graph() { return jsgraph_->graph(); }
+  CommonOperatorBuilder* common() { return jsgraph_->common(); }
+  MachineOperatorBuilder* machine() { return jsgraph_->machine(); }
   SimplifiedOperatorBuilder* simplified() { return &simplified_; }

-  Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);

   JSGraph* jsgraph_;
=======================================
--- /trunk/src/compiler/js-graph.h      Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/js-graph.h      Tue Sep 23 12:44:49 2014 UTC
@@ -65,9 +65,6 @@

   // Creates a Int32Constant node, usually canonicalized.
   Node* Int32Constant(int32_t value);
-  Node* Uint32Constant(uint32_t value) {
-    return Int32Constant(bit_cast<int32_t>(value));
-  }

   // Creates a Float64Constant node, usually canonicalized.
   Node* Float64Constant(double value);
@@ -112,7 +109,6 @@

   Factory* factory() { return isolate()->factory(); }
 };
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
=======================================
--- /trunk/src/compiler/js-typed-lowering.cc    Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/js-typed-lowering.cc    Tue Sep 23 12:44:49 2014 UTC
@@ -571,14 +571,13 @@
   // TODO(mstarzinger): This lowering is not correct if:
   //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
   //   b) The typed array or it's buffer is neutered.
+  //   c) The index is out of bounds
   if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
       base_type->AsConstant()->Value()->IsJSTypedArray()) {
     // JSStoreProperty(typed-array, int32, value)
JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
     ElementsKind elements_kind = array->map()->elements_kind();
     ExternalArrayType type = array->type();
-    uint32_t length;
-    CHECK(array->length()->ToUint32(&length));
     ElementAccess element_access;
     Node* elements = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
@@ -592,24 +591,11 @@
       DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
       element_access = AccessBuilder::ForTypedArrayElement(type, false);
     }
-
-    Node* check = graph()->NewNode(machine()->Uint32LessThan(), key,
-                                   jsgraph()->Uint32Constant(length));
-    Node* branch = graph()->NewNode(common()->Branch(), check,
-                                    NodeProperties::GetControlInput(node));
-
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* store = graph()->NewNode(
-        simplified()->StoreElement(element_access), elements, key, value,
-        NodeProperties::GetEffectInput(node), if_true);
-
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-
-    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-    Node* phi = graph()->NewNode(common()->EffectPhi(2), store,
- NodeProperties::GetEffectInput(node), merge);
-
-    return ReplaceWith(phi);
+    Node* store =
+ graph()->NewNode(simplified()->StoreElement(element_access), elements,
+                         key, value, NodeProperties::GetEffectInput(node),
+                         NodeProperties::GetControlInput(node));
+    return ReplaceEagerly(node, store);
   }
   return NoChange();
 }
=======================================
--- /trunk/src/compiler/machine-operator-reducer-unittest.cc Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/machine-operator-reducer-unittest.cc Tue Sep 23 12:44:49 2014 UTC
@@ -46,43 +46,6 @@

 namespace {

-static const float kFloat32Values[] = {
-    -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
-    -1.22813e+35f,                           -1.20555e+35f, -1.34584e+34f,
-    -1.0079e+32f,                            -6.49364e+26f, -3.06077e+25f,
-    -1.46821e+25f,                           -1.17658e+23f, -1.9617e+22f,
-    -2.7357e+20f,                            -1.48708e+13f, -1.89633e+12f,
-    -4.66622e+11f,                           -2.22581e+11f, -1.45381e+10f,
-    -1.3956e+09f,                            -1.32951e+09f, -1.30721e+09f,
-    -1.19756e+09f,                           -9.26822e+08f, -6.35647e+08f,
-    -4.00037e+08f,                           -1.81227e+08f, -5.09256e+07f,
-    -964300.0f,                              -192446.0f,    -28455.0f,
-    -27194.0f,                               -26401.0f,     -20575.0f,
-    -17069.0f,                               -9167.0f,      -960.178f,
-    -113.0f,                                 -62.0f,        -15.0f,
-    -7.0f,                                   -0.0256635f,   -4.60374e-07f,
-    -3.63759e-10f,                           -4.30175e-14f, -5.27385e-15f,
-    -1.48084e-15f,                           -1.05755e-19f, -3.2995e-21f,
-    -1.67354e-23f,                           -1.11885e-23f, -1.78506e-30f,
-    -5.07594e-31f,                           -3.65799e-31f, -1.43718e-34f,
-    -1.27126e-38f,                           -0.0f,         0.0f,
-    1.17549e-38f,                            1.56657e-37f,  4.08512e-29f,
-    3.31357e-28f,                            6.25073e-22f,  4.1723e-13f,
-    1.44343e-09f,                            5.27004e-08f,  9.48298e-08f,
-    5.57888e-07f,                            4.89988e-05f,  0.244326f,
-    12.4895f,                                19.0f,         47.0f,
-    106.0f,                                  538.324f,      564.536f,
-    819.124f,                                7048.0f,       12611.0f,
-    19878.0f,                                20309.0f,      797056.0f,
-    1.77219e+09f,                            1.51116e+11f,  4.18193e+13f,
-    3.59167e+16f,                            3.38211e+19f,  2.67488e+20f,
-    1.78831e+21f,                            9.20914e+21f,  8.35654e+23f,
-    1.4495e+24f,                             5.94015e+25f,  4.43608e+30f,
-    2.44502e+33f,                            2.61152e+33f,  1.38178e+37f,
-    1.71306e+37f,                            3.31899e+38f,  3.40282e+38f,
-    std::numeric_limits<float>::infinity()};
-
-
 static const double kFloat64Values[] = {
-V8_INFINITY, -4.23878e+275, -5.82632e+265, -6.60355e+220, -6.26172e+212, -2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127, -1.55772e+100,
@@ -202,7 +165,7 @@
 namespace {

 struct UnaryOperator {
-  const Operator* (MachineOperatorBuilder::*constructor)();
+  const Operator* (MachineOperatorBuilder::*constructor)() const;
   const char* constructor_name;
 };

@@ -240,20 +203,6 @@
 INSTANTIATE_TEST_CASE_P(MachineOperatorReducerTest,
                         MachineUnaryOperatorReducerTest,
                         ::testing::ValuesIn(kUnaryOperators));
-
-
-// -----------------------------------------------------------------------------
-// ChangeFloat64ToFloat32
-
-
-TEST_F(MachineOperatorReducerTest, ChangeFloat64ToFloat32WithConstant) {
-  TRACED_FOREACH(float, x, kFloat32Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        machine()->ChangeFloat32ToFloat64(), Float32Constant(x)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(x));
-  }
-}


// -----------------------------------------------------------------------------
@@ -363,31 +312,6 @@
IsInt64Constant(bit_cast<int64_t>(static_cast<uint64_t>(x))));
   }
 }
-
-
-// -----------------------------------------------------------------------------
-// TruncateFloat64ToFloat32
-
-
-TEST_F(MachineOperatorReducerTest,
-       TruncateFloat64ToFloat32WithChangeFloat32ToFloat64) {
-  Node* value = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      machine()->TruncateFloat64ToFloat32(),
-      graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(value, reduction.replacement());
-}
-
-
-TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
-  TRACED_FOREACH(double, x, kFloat64Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        machine()->TruncateFloat64ToFloat32(), Float64Constant(x)));
-    ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsFloat32Constant(DoubleToFloat32(x)));
-  }
-}


// -----------------------------------------------------------------------------
=======================================
--- /trunk/src/compiler/machine-operator-reducer.cc Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/machine-operator-reducer.cc Tue Sep 23 12:44:49 2014 UTC
@@ -19,11 +19,6 @@


 MachineOperatorReducer::~MachineOperatorReducer() {}
-
-
-Node* MachineOperatorReducer::Float32Constant(volatile float value) {
-  return graph()->NewNode(common()->Float32Constant(value));
-}


 Node* MachineOperatorReducer::Float64Constant(volatile double value) {
@@ -388,11 +383,6 @@
       }
       break;
     }
-    case IrOpcode::kChangeFloat32ToFloat64: {
-      Float32Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceFloat64(m.Value());
-      break;
-    }
     case IrOpcode::kChangeFloat64ToInt32: {
       Float64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceInt32(FastD2I(m.Value()));
@@ -437,12 +427,6 @@
       if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
       break;
     }
-    case IrOpcode::kTruncateFloat64ToFloat32: {
-      Float64Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value()));
- if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
-      break;
-    }
     // TODO(turbofan): strength-reduce and fold floating point operations.
     default:
       break;
=======================================
--- /trunk/src/compiler/machine-operator-reducer.h Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/machine-operator-reducer.h Tue Sep 23 12:44:49 2014 UTC
@@ -27,15 +27,11 @@
   virtual Reduction Reduce(Node* node) OVERRIDE;

  private:
-  Node* Float32Constant(volatile float value);
   Node* Float64Constant(volatile double value);
   Node* Int32Constant(int32_t value);
   Node* Int64Constant(int64_t value);

   Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
-  Reduction ReplaceFloat32(volatile float value) {
-    return Replace(Float32Constant(value));
-  }
   Reduction ReplaceFloat64(volatile double value) {
     return Replace(Float64Constant(value));
   }
=======================================
--- /trunk/src/compiler/machine-operator-unittest.cc Tue Sep 23 08:38:19 2014 UTC +++ /trunk/src/compiler/machine-operator-unittest.cc Tue Sep 23 12:44:49 2014 UTC
@@ -169,7 +169,7 @@
 namespace {

 struct PureOperator {
-  const Operator* (MachineOperatorBuilder::*constructor)();
+  const Operator* (MachineOperatorBuilder::*constructor)() const;
   IrOpcode::Value opcode;
   int value_input_count;
   int value_output_count;
@@ -187,33 +187,32 @@
     &MachineOperatorBuilder::Name, IrOpcode::k##Name, input_count, \
         output_count                                               \
   }
-    PURE(Word32And, 2, 1),                PURE(Word32Or, 2, 1),
-    PURE(Word32Xor, 2, 1),                PURE(Word32Shl, 2, 1),
-    PURE(Word32Shr, 2, 1),                PURE(Word32Sar, 2, 1),
-    PURE(Word32Ror, 2, 1),                PURE(Word32Equal, 2, 1),
-    PURE(Word64And, 2, 1),                PURE(Word64Or, 2, 1),
-    PURE(Word64Xor, 2, 1),                PURE(Word64Shl, 2, 1),
-    PURE(Word64Shr, 2, 1),                PURE(Word64Sar, 2, 1),
-    PURE(Word64Ror, 2, 1),                PURE(Word64Equal, 2, 1),
-    PURE(Int32Add, 2, 1),                 PURE(Int32AddWithOverflow, 2, 2),
-    PURE(Int32Sub, 2, 1),                 PURE(Int32SubWithOverflow, 2, 2),
-    PURE(Int32Mul, 2, 1),                 PURE(Int32Div, 2, 1),
-    PURE(Int32UDiv, 2, 1),                PURE(Int32Mod, 2, 1),
-    PURE(Int32UMod, 2, 1),                PURE(Int32LessThan, 2, 1),
-    PURE(Int32LessThanOrEqual, 2, 1),     PURE(Uint32LessThan, 2, 1),
-    PURE(Uint32LessThanOrEqual, 2, 1),    PURE(Int64Add, 2, 1),
-    PURE(Int64Sub, 2, 1),                 PURE(Int64Mul, 2, 1),
-    PURE(Int64Div, 2, 1),                 PURE(Int64UDiv, 2, 1),
-    PURE(Int64Mod, 2, 1),                 PURE(Int64UMod, 2, 1),
-    PURE(Int64LessThan, 2, 1),            PURE(Int64LessThanOrEqual, 2, 1),
-    PURE(ChangeFloat32ToFloat64, 1, 1),   PURE(ChangeFloat64ToInt32, 1, 1),
-    PURE(ChangeFloat64ToUint32, 1, 1),    PURE(ChangeInt32ToInt64, 1, 1),
-    PURE(ChangeUint32ToFloat64, 1, 1),    PURE(ChangeUint32ToUint64, 1, 1),
- PURE(TruncateFloat64ToFloat32, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1),
-    PURE(TruncateInt64ToInt32, 1, 1),     PURE(Float64Add, 2, 1),
-    PURE(Float64Sub, 2, 1),               PURE(Float64Mul, 2, 1),
-    PURE(Float64Div, 2, 1),               PURE(Float64Mod, 2, 1),
-    PURE(Float64Equal, 2, 1),             PURE(Float64LessThan, 2, 1),
+    PURE(Word32And, 2, 1),             PURE(Word32Or, 2, 1),
+    PURE(Word32Xor, 2, 1),             PURE(Word32Shl, 2, 1),
+    PURE(Word32Shr, 2, 1),             PURE(Word32Sar, 2, 1),
+    PURE(Word32Ror, 2, 1),             PURE(Word32Equal, 2, 1),
+    PURE(Word64And, 2, 1),             PURE(Word64Or, 2, 1),
+    PURE(Word64Xor, 2, 1),             PURE(Word64Shl, 2, 1),
+    PURE(Word64Shr, 2, 1),             PURE(Word64Sar, 2, 1),
+    PURE(Word64Ror, 2, 1),             PURE(Word64Equal, 2, 1),
+    PURE(Int32Add, 2, 1),              PURE(Int32AddWithOverflow, 2, 2),
+    PURE(Int32Sub, 2, 1),              PURE(Int32SubWithOverflow, 2, 2),
+    PURE(Int32Mul, 2, 1),              PURE(Int32Div, 2, 1),
+    PURE(Int32UDiv, 2, 1),             PURE(Int32Mod, 2, 1),
+    PURE(Int32UMod, 2, 1),             PURE(Int32LessThan, 2, 1),
+    PURE(Int32LessThanOrEqual, 2, 1),  PURE(Uint32LessThan, 2, 1),
+    PURE(Uint32LessThanOrEqual, 2, 1), PURE(Int64Add, 2, 1),
+    PURE(Int64Sub, 2, 1),              PURE(Int64Mul, 2, 1),
+    PURE(Int64Div, 2, 1),              PURE(Int64UDiv, 2, 1),
+    PURE(Int64Mod, 2, 1),              PURE(Int64UMod, 2, 1),
+    PURE(Int64LessThan, 2, 1),         PURE(Int64LessThanOrEqual, 2, 1),
+    PURE(ChangeFloat64ToInt32, 1, 1),  PURE(ChangeFloat64ToUint32, 1, 1),
+    PURE(ChangeInt32ToInt64, 1, 1),    PURE(ChangeUint32ToFloat64, 1, 1),
+    PURE(ChangeUint32ToUint64, 1, 1),  PURE(TruncateFloat64ToInt32, 1, 1),
+    PURE(TruncateInt64ToInt32, 1, 1),  PURE(Float64Add, 2, 1),
+    PURE(Float64Sub, 2, 1),            PURE(Float64Mul, 2, 1),
+    PURE(Float64Div, 2, 1),            PURE(Float64Mod, 2, 1),
+    PURE(Float64Equal, 2, 1),          PURE(Float64LessThan, 2, 1),
     PURE(Float64LessThanOrEqual, 2, 1)
 #undef PURE
 };
=======================================
--- /trunk/src/compiler/machine-operator.cc     Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/machine-operator.cc     Tue Sep 23 12:44:49 2014 UTC
@@ -97,14 +97,12 @@
V(Int64UMod, Operator::kNoProperties, 2, 1) \ V(Int64LessThan, Operator::kNoProperties, 2, 1) \ V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 1) \ - V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 1) \ V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 1) \ V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 1) \ V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 1) \ V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 1) \ V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 1) \ V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 1) \ - V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 1) \ V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 1) \ V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 1) \ V(Float64Add, Operator::kCommutative, 2, 1) \
@@ -196,12 +194,14 @@


 #define PURE(Name, properties, input_count, output_count) \
-  const Operator* MachineOperatorBuilder::Name() { return &impl_.k##Name; }
+  const Operator* MachineOperatorBuilder::Name() const {  \
+    return &impl_.k##Name;                                \
+  }
 PURE_OP_LIST(PURE)
 #undef PURE


-const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
+const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) const {
   switch (rep) {
 #define LOAD(Type) \
   case k##Type:    \
@@ -217,7 +217,7 @@
 }


-const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
+const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) const {
   switch (rep.machine_type()) {
 #define STORE(Type)                                     \
   case k##Type:                                         \
=======================================
--- /trunk/src/compiler/machine-operator.h      Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/machine-operator.h      Tue Sep 23 12:44:49 2014 UTC
@@ -62,84 +62,84 @@
  public:
   explicit MachineOperatorBuilder(MachineType word = kMachPtr);

-  const Operator* Word32And();
-  const Operator* Word32Or();
-  const Operator* Word32Xor();
-  const Operator* Word32Shl();
-  const Operator* Word32Shr();
-  const Operator* Word32Sar();
-  const Operator* Word32Ror();
-  const Operator* Word32Equal();
+  const Operator* Word32And() const WARN_UNUSED_RESULT;
+  const Operator* Word32Or() const WARN_UNUSED_RESULT;
+  const Operator* Word32Xor() const WARN_UNUSED_RESULT;
+  const Operator* Word32Shl() const WARN_UNUSED_RESULT;
+  const Operator* Word32Shr() const WARN_UNUSED_RESULT;
+  const Operator* Word32Sar() const WARN_UNUSED_RESULT;
+  const Operator* Word32Ror() const WARN_UNUSED_RESULT;
+  const Operator* Word32Equal() const WARN_UNUSED_RESULT;

-  const Operator* Word64And();
-  const Operator* Word64Or();
-  const Operator* Word64Xor();
-  const Operator* Word64Shl();
-  const Operator* Word64Shr();
-  const Operator* Word64Sar();
-  const Operator* Word64Ror();
-  const Operator* Word64Equal();
+  const Operator* Word64And() const WARN_UNUSED_RESULT;
+  const Operator* Word64Or() const WARN_UNUSED_RESULT;
+  const Operator* Word64Xor() const WARN_UNUSED_RESULT;
+  const Operator* Word64Shl() const WARN_UNUSED_RESULT;
+  const Operator* Word64Shr() const WARN_UNUSED_RESULT;
+  const Operator* Word64Sar() const WARN_UNUSED_RESULT;
+  const Operator* Word64Ror() const WARN_UNUSED_RESULT;
+  const Operator* Word64Equal() const WARN_UNUSED_RESULT;

-  const Operator* Int32Add();
-  const Operator* Int32AddWithOverflow();
-  const Operator* Int32Sub();
-  const Operator* Int32SubWithOverflow();
-  const Operator* Int32Mul();
-  const Operator* Int32Div();
-  const Operator* Int32UDiv();
-  const Operator* Int32Mod();
-  const Operator* Int32UMod();
-  const Operator* Int32LessThan();
-  const Operator* Int32LessThanOrEqual();
-  const Operator* Uint32LessThan();
-  const Operator* Uint32LessThanOrEqual();
+  const Operator* Int32Add() const WARN_UNUSED_RESULT;
+  const Operator* Int32AddWithOverflow() const WARN_UNUSED_RESULT;
+  const Operator* Int32Sub() const WARN_UNUSED_RESULT;
+  const Operator* Int32SubWithOverflow() const WARN_UNUSED_RESULT;
+  const Operator* Int32Mul() const WARN_UNUSED_RESULT;
+  const Operator* Int32Div() const WARN_UNUSED_RESULT;
+  const Operator* Int32UDiv() const WARN_UNUSED_RESULT;
+  const Operator* Int32Mod() const WARN_UNUSED_RESULT;
+  const Operator* Int32UMod() const WARN_UNUSED_RESULT;
+  const Operator* Int32LessThan() const WARN_UNUSED_RESULT;
+  const Operator* Int32LessThanOrEqual() const WARN_UNUSED_RESULT;
+  const Operator* Uint32LessThan() const WARN_UNUSED_RESULT;
+  const Operator* Uint32LessThanOrEqual() const WARN_UNUSED_RESULT;

-  const Operator* Int64Add();
-  const Operator* Int64Sub();
-  const Operator* Int64Mul();
-  const Operator* Int64Div();
-  const Operator* Int64UDiv();
-  const Operator* Int64Mod();
-  const Operator* Int64UMod();
-  const Operator* Int64LessThan();
-  const Operator* Int64LessThanOrEqual();
+  const Operator* Int64Add() const WARN_UNUSED_RESULT;
+  const Operator* Int64Sub() const WARN_UNUSED_RESULT;
+  const Operator* Int64Mul() const WARN_UNUSED_RESULT;
+  const Operator* Int64Div() const WARN_UNUSED_RESULT;
+  const Operator* Int64UDiv() const WARN_UNUSED_RESULT;
+  const Operator* Int64Mod() const WARN_UNUSED_RESULT;
+  const Operator* Int64UMod() const WARN_UNUSED_RESULT;
+  const Operator* Int64LessThan() const WARN_UNUSED_RESULT;
+  const Operator* Int64LessThanOrEqual() const WARN_UNUSED_RESULT;
+
+  // Convert representation of integers between float64 and int32/uint32.
+ // The precise rounding mode and handling of out of range inputs are *not*
+  // defined for these operators, since they are intended only for use with
+  // integers.
+  const Operator* ChangeInt32ToFloat64() const WARN_UNUSED_RESULT;
+  const Operator* ChangeUint32ToFloat64() const WARN_UNUSED_RESULT;
+  const Operator* ChangeFloat64ToInt32() const WARN_UNUSED_RESULT;
+  const Operator* ChangeFloat64ToUint32() const WARN_UNUSED_RESULT;
+
+  // Sign/zero extend int32/uint32 to int64/uint64.
+  const Operator* ChangeInt32ToInt64() const WARN_UNUSED_RESULT;
+  const Operator* ChangeUint32ToUint64() const WARN_UNUSED_RESULT;

- // These operators change the representation of numbers while preserving the - // value of the number. Narrowing operators assume the input is representable
-  // in the target type and are *not* defined for other inputs.
- // Use narrowing change operators only when there is a static guarantee that
-  // the input value is representable in the target value.
-  const Operator* ChangeFloat32ToFloat64();
-  const Operator* ChangeFloat64ToInt32();   // narrowing
-  const Operator* ChangeFloat64ToUint32();  // narrowing
-  const Operator* ChangeInt32ToFloat64();
-  const Operator* ChangeInt32ToInt64();
-  const Operator* ChangeUint32ToFloat64();
-  const Operator* ChangeUint32ToUint64();
+  // Truncate double to int32 using JavaScript semantics.
+  const Operator* TruncateFloat64ToInt32() const WARN_UNUSED_RESULT;

-  // These operators truncate numbers, both changing the representation of
- // the number and mapping multiple input values onto the same output value.
-  const Operator* TruncateFloat64ToFloat32();
-  const Operator* TruncateFloat64ToInt32();  // JavaScript semantics.
-  const Operator* TruncateInt64ToInt32();
+  // Truncate the high order bits and convert the remaining bits to int32.
+  const Operator* TruncateInt64ToInt32() const WARN_UNUSED_RESULT;

// Floating point operators always operate with IEEE 754 round-to-nearest.
-  const Operator* Float64Add();
-  const Operator* Float64Sub();
-  const Operator* Float64Mul();
-  const Operator* Float64Div();
-  const Operator* Float64Mod();
+  const Operator* Float64Add() const WARN_UNUSED_RESULT;
+  const Operator* Float64Sub() const WARN_UNUSED_RESULT;
+  const Operator* Float64Mul() const WARN_UNUSED_RESULT;
+  const Operator* Float64Div() const WARN_UNUSED_RESULT;
+  const Operator* Float64Mod() const WARN_UNUSED_RESULT;

   // Floating point comparisons complying to IEEE 754.
-  const Operator* Float64Equal();
-  const Operator* Float64LessThan();
-  const Operator* Float64LessThanOrEqual();
+  const Operator* Float64Equal() const WARN_UNUSED_RESULT;
+  const Operator* Float64LessThan() const WARN_UNUSED_RESULT;
+  const Operator* Float64LessThanOrEqual() const WARN_UNUSED_RESULT;

   // load [base + index]
-  const Operator* Load(LoadRepresentation rep);
+  const Operator* Load(LoadRepresentation rep) const WARN_UNUSED_RESULT;

   // store [base + index], value
-  const Operator* Store(StoreRepresentation rep);
+  const Operator* Store(StoreRepresentation rep) const WARN_UNUSED_RESULT;

   // Target machine word-size assumed by this builder.
   bool Is32() const { return word() == kRepWord32; }
@@ -167,7 +167,7 @@
   V(Int, LessThan)        \
   V(Int, LessThanOrEqual)
 #define PSEUDO_OP(Prefix, Suffix)                                \
-  const Operator* Prefix##Suffix() {                             \
+  const Operator* Prefix##Suffix() const {                       \
     return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \
   }
   PSEUDO_OP_LIST(PSEUDO_OP)
=======================================
--- /trunk/src/compiler/node-matchers.h Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/node-matchers.h Tue Sep 23 12:44:49 2014 UTC
@@ -90,7 +90,6 @@
bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
 };

-typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
 typedef FloatMatcher<double, IrOpcode::kFloat64Constant> Float64Matcher;
 typedef FloatMatcher<double, IrOpcode::kNumberConstant> NumberMatcher;

=======================================
--- /trunk/src/compiler/opcodes.h       Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/opcodes.h       Tue Sep 23 12:44:49 2014 UTC
@@ -25,7 +25,6 @@
 #define LEAF_OP_LIST(V) \
   V(Int32Constant)      \
   V(Int64Constant)      \
-  V(Float32Constant)    \
   V(Float64Constant)    \
   V(ExternalConstant)   \
   V(NumberConstant)     \
@@ -162,64 +161,62 @@
   V(StoreElement)

 // Opcodes for Machine-level operators.
-#define MACHINE_OP_LIST(V)    \
-  V(Load)                     \
-  V(Store)                    \
-  V(Word32And)                \
-  V(Word32Or)                 \
-  V(Word32Xor)                \
-  V(Word32Shl)                \
-  V(Word32Shr)                \
-  V(Word32Sar)                \
-  V(Word32Ror)                \
-  V(Word32Equal)              \
-  V(Word64And)                \
-  V(Word64Or)                 \
-  V(Word64Xor)                \
-  V(Word64Shl)                \
-  V(Word64Shr)                \
-  V(Word64Sar)                \
-  V(Word64Ror)                \
-  V(Word64Equal)              \
-  V(Int32Add)                 \
-  V(Int32AddWithOverflow)     \
-  V(Int32Sub)                 \
-  V(Int32SubWithOverflow)     \
-  V(Int32Mul)                 \
-  V(Int32Div)                 \
-  V(Int32UDiv)                \
-  V(Int32Mod)                 \
-  V(Int32UMod)                \
-  V(Int32LessThan)            \
-  V(Int32LessThanOrEqual)     \
-  V(Uint32LessThan)           \
-  V(Uint32LessThanOrEqual)    \
-  V(Int64Add)                 \
-  V(Int64Sub)                 \
-  V(Int64Mul)                 \
-  V(Int64Div)                 \
-  V(Int64UDiv)                \
-  V(Int64Mod)                 \
-  V(Int64UMod)                \
-  V(Int64LessThan)            \
-  V(Int64LessThanOrEqual)     \
-  V(ChangeFloat32ToFloat64)   \
-  V(ChangeFloat64ToInt32)     \
-  V(ChangeFloat64ToUint32)    \
-  V(ChangeInt32ToFloat64)     \
-  V(ChangeInt32ToInt64)       \
-  V(ChangeUint32ToFloat64)    \
-  V(ChangeUint32ToUint64)     \
-  V(TruncateFloat64ToFloat32) \
-  V(TruncateFloat64ToInt32)   \
-  V(TruncateInt64ToInt32)     \
-  V(Float64Add)               \
-  V(Float64Sub)               \
-  V(Float64Mul)               \
-  V(Float64Div)               \
-  V(Float64Mod)               \
-  V(Float64Equal)             \
-  V(Float64LessThan)          \
+#define MACHINE_OP_LIST(V)  \
+  V(Load)                   \
+  V(Store)                  \
+  V(Word32And)              \
+  V(Word32Or)               \
+  V(Word32Xor)              \
+  V(Word32Shl)              \
+  V(Word32Shr)              \
+  V(Word32Sar)              \
+  V(Word32Ror)              \
+  V(Word32Equal)            \
+  V(Word64And)              \
+  V(Word64Or)               \
+  V(Word64Xor)              \
+  V(Word64Shl)              \
+  V(Word64Shr)              \
+  V(Word64Sar)              \
+  V(Word64Ror)              \
+  V(Word64Equal)            \
+  V(Int32Add)               \
+  V(Int32AddWithOverflow)   \
+  V(Int32Sub)               \
+  V(Int32SubWithOverflow)   \
+  V(Int32Mul)               \
+  V(Int32Div)               \
+  V(Int32UDiv)              \
+  V(Int32Mod)               \
+  V(Int32UMod)              \
+  V(Int32LessThan)          \
+  V(Int32LessThanOrEqual)   \
+  V(Uint32LessThan)         \
+  V(Uint32LessThanOrEqual)  \
+  V(Int64Add)               \
+  V(Int64Sub)               \
+  V(Int64Mul)               \
+  V(Int64Div)               \
+  V(Int64UDiv)              \
+  V(Int64Mod)               \
+  V(Int64UMod)              \
+  V(Int64LessThan)          \
+  V(Int64LessThanOrEqual)   \
+  V(ChangeInt32ToFloat64)   \
+  V(ChangeUint32ToFloat64)  \
+  V(ChangeFloat64ToInt32)   \
+  V(ChangeFloat64ToUint32)  \
+  V(ChangeInt32ToInt64)     \
+  V(ChangeUint32ToUint64)   \
+  V(TruncateFloat64ToInt32) \
+  V(TruncateInt64ToInt32)   \
+  V(Float64Add)             \
+  V(Float64Sub)             \
+  V(Float64Mul)             \
+  V(Float64Div)             \
+  V(Float64Mod)             \
+  V(Float64Equal)           \
+  V(Float64LessThan)        \
   V(Float64LessThanOrEqual)

 #define VALUE_OP_LIST(V) \
=======================================
--- /trunk/src/compiler/pipeline.cc     Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/pipeline.cc     Tue Sep 23 12:44:49 2014 UTC
@@ -269,14 +269,13 @@
       SourcePositionTable::Scope pos(&source_positions,
                                      SourcePosition::Unknown());
       Linkage linkage(info());
-      // TODO(turbofan): Value numbering disabled for now.
-      // ValueNumberingReducer vn_reducer(zone());
+      ValueNumberingReducer vn_reducer(zone());
       SimplifiedOperatorReducer simple_reducer(&jsgraph);
       ChangeLowering lowering(&jsgraph, &linkage);
       MachineOperatorReducer mach_reducer(&jsgraph);
       GraphReducer graph_reducer(&graph);
// TODO(titzer): Figure out if we should run all reducers at once here.
-      // graph_reducer.AddReducer(&vn_reducer);
+      graph_reducer.AddReducer(&vn_reducer);
       graph_reducer.AddReducer(&simple_reducer);
       graph_reducer.AddReducer(&lowering);
       graph_reducer.AddReducer(&mach_reducer);
=======================================
--- /trunk/src/compiler/raw-machine-assembler.h Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/compiler/raw-machine-assembler.h Tue Sep 23 12:44:49 2014 UTC
@@ -5,6 +5,12 @@
 #ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
 #define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_

+#ifdef USE_SIMULATOR
+#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 0
+#else
+#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 1
+#endif
+
 #include "src/v8.h"

 #include "src/compiler/common-operator.h"
@@ -368,6 +374,21 @@
   Node* TruncateInt64ToInt32(Node* a) {
     return NewNode(machine()->TruncateInt64ToInt32(), a);
   }
+
+#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+  // Call to C.
+  Node* CallC(Node* function_address, MachineType return_type,
+              MachineType* arg_types, Node** args, int n_args) {
+    CallDescriptor* descriptor =
+        Linkage::GetSimplifiedCDescriptor(zone(), machine_sig());
+    Node** passed_args = zone()->NewArray<Node*>(n_args + 1);
+    passed_args[0] = function_address;
+    for (int i = 0; i < n_args; ++i) {
+      passed_args[i + 1] = args[i];
+    }
+    return NewNode(common()->Call(descriptor), n_args + 1, passed_args);
+  }
+#endif

   // Parameters.
   Node* Parameter(size_t index);
=======================================
***Additional files exist in this changeset.***

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to