Revision: 23959
Author: [email protected]
Date: Tue Sep 16 07:50:38 2014 UTC
Log: Version 3.29.66 (based on bleeding_edge revision r23957)
Currently, a new isolate is created in an uninitialized state, and several
API methods will automatically initialize it. During this uninitialized
state, code event handlers and function entry handlers can be attached to
the isolate.
Performance and stability improvements on all platforms.
https://code.google.com/p/v8/source/detail?r=23959
Added:
/trunk/src/third_party/vtune/DEPS
/trunk/test/mjsunit/regress/string-compare-memcmp.js
Deleted:
/trunk/tools/run_benchmarks.py
/trunk/tools/unittests/run_benchmarks_test.py
Modified:
/trunk/BUILD.gn
/trunk/ChangeLog
/trunk/PRESUBMIT.py
/trunk/include/v8.h
/trunk/src/api.cc
/trunk/src/ast.cc
/trunk/src/ast.h
/trunk/src/base/build_config.h
/trunk/src/compiler/arm64/instruction-selector-arm64-unittest.cc
/trunk/src/compiler/ast-graph-builder.cc
/trunk/src/compiler/code-generator.cc
/trunk/src/compiler/code-generator.h
/trunk/src/compiler/linkage.cc
/trunk/src/compiler/scheduler.cc
/trunk/src/compiler/simplified-operator-reducer-unittest.cc
/trunk/src/compiler/simplified-operator-unittest.cc
/trunk/src/compiler/simplified-operator.cc
/trunk/src/compiler/simplified-operator.h
/trunk/src/compiler/typer.cc
/trunk/src/d8.cc
/trunk/src/deoptimizer.cc
/trunk/src/deoptimizer.h
/trunk/src/heap/gc-idle-time-handler-unittest.cc
/trunk/src/heap/gc-idle-time-handler.cc
/trunk/src/heap/heap.cc
/trunk/src/objects.cc
/trunk/src/objects.h
/trunk/src/regexp-macro-assembler-irregexp.h
/trunk/src/regexp-macro-assembler.cc
/trunk/src/regexp-macro-assembler.h
/trunk/src/runtime.cc
/trunk/src/safepoint-table.cc
/trunk/src/safepoint-table.h
/trunk/src/snapshot-source-sink.cc
/trunk/src/third_party/vtune/v8-vtune.h
/trunk/src/third_party/vtune/vtune-jit.cc
/trunk/src/utils.h
/trunk/src/version.cc
/trunk/test/benchmarks/benchmarks.status
/trunk/test/benchmarks/testcfg.py
/trunk/test/cctest/cctest.status
/trunk/test/cctest/compiler/function-tester.h
/trunk/test/cctest/compiler/test-js-typed-lowering.cc
/trunk/test/cctest/test-api.cc
/trunk/test/mjsunit/mjsunit.status
/trunk/test/mjsunit/regress/poly_count_operation.js
/trunk/test/mozilla/mozilla.status
/trunk/test/webkit/webkit.status
/trunk/tools/run_perf.py
/trunk/tools/unittests/run_perf_test.py
=======================================
--- /dev/null
+++ /trunk/src/third_party/vtune/DEPS Tue Sep 16 07:50:38 2014 UTC
@@ -0,0 +1,3 @@
+include_rules = [
+ "+../../../include",
+]
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/regress/string-compare-memcmp.js Tue Sep 16
07:50:38 2014 UTC
@@ -0,0 +1,7 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertEquals(-1, %StringCompare("abc\u0102", "abc\u0201"));
=======================================
--- /trunk/tools/run_benchmarks.py Fri Aug 29 00:04:38 2014 UTC
+++ /dev/null
@@ -1,488 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Performance runner for d8.
-
-Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json
-
-The suite json format is expected to be:
-{
- "path": <relative path chunks to benchmark resources and main file>,
- "name": <optional suite name, file name is default>,
- "archs": [<architecture name for which this suite is run>, ...],
- "binary": <name of binary to run, default "d8">,
- "flags": [<flag to d8>, ...],
- "run_count": <how often will this suite run (optional)>,
- "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
- "resources": [<js file to be loaded before main>, ...]
- "main": <main js benchmark runner file>,
- "results_regexp": <optional regexp>,
- "results_processor": <optional python results processor script>,
- "units": <the unit specification for the performance dashboard>,
- "benchmarks": [
- {
- "name": <name of the benchmark>,
- "results_regexp": <optional more specific regexp>,
- "results_processor": <optional python results processor script>,
- "units": <the unit specification for the performance dashboard>,
- }, ...
- ]
-}
-
-The benchmarks field can also nest other suites in arbitrary depth. A suite
-with a "main" file is a leaf suite that can contain one more level of
-benchmarks.
-
-A suite's results_regexp is expected to have one string place holder
-"%s" for the benchmark name. A benchmark's results_regexp overwrites suite
-defaults.
-
-A suite's results_processor may point to an optional python script. If
-specified, it is called after running the benchmarks like this (with a path
-relatve to the suite level's path):
-<results_processor file> <same flags as for d8> <suite level name> <output>
-
-The <output> is a temporary file containing d8 output. The results_regexp
will
-be applied to the output of this script.
-
-A suite without "benchmarks" is considered a benchmark itself.
-
-Full example (suite with one runner):
-{
- "path": ["."],
- "flags": ["--expose-gc"],
- "archs": ["ia32", "x64"],
- "run_count": 5,
- "run_count_ia32": 3,
- "main": "run.js",
- "results_regexp": "^%s: (.+)$",
- "units": "score",
- "benchmarks": [
- {"name": "Richards"},
- {"name": "DeltaBlue"},
- {"name": "NavierStokes",
- "results_regexp": "^NavierStokes: (.+)$"}
- ]
-}
-
-Full example (suite with several runners):
-{
- "path": ["."],
- "flags": ["--expose-gc"],
- "archs": ["ia32", "x64"],
- "run_count": 5,
- "units": "score",
- "benchmarks": [
- {"name": "Richards",
- "path": ["richards"],
- "main": "run.js",
- "run_count": 3,
- "results_regexp": "^Richards: (.+)$"},
- {"name": "NavierStokes",
- "path": ["navier_stokes"],
- "main": "run.js",
- "results_regexp": "^NavierStokes: (.+)$"}
- ]
-}
-
-Path pieces are concatenated. D8 is always run with the suite's path as
cwd.
-"""
-
-import json
-import math
-import optparse
-import os
-import re
-import sys
-
-from testrunner.local import commands
-from testrunner.local import utils
-
-ARCH_GUESS = utils.DefaultArch()
-SUPPORTED_ARCHS = ["android_arm",
- "android_arm64",
- "android_ia32",
- "arm",
- "ia32",
- "mips",
- "mipsel",
- "nacl_ia32",
- "nacl_x64",
- "x64",
- "arm64"]
-
-GENERIC_RESULTS_RE = re.compile(
- r"^Trace\(([^\)]+)\), Result\(([^\)]+)\), StdDev\(([^\)]+)\)$")
-
-
-def GeometricMean(values):
- """Returns the geometric mean of a list of values.
-
- The mean is calculated using log to avoid overflow.
- """
- values = map(float, values)
- return str(math.exp(sum(map(math.log, values)) / len(values)))
-
-
-class Results(object):
- """Place holder for result traces."""
- def __init__(self, traces=None, errors=None):
- self.traces = traces or []
- self.errors = errors or []
-
- def ToDict(self):
- return {"traces": self.traces, "errors": self.errors}
-
- def WriteToFile(self, file_name):
- with open(file_name, "w") as f:
- f.write(json.dumps(self.ToDict()))
-
- def __add__(self, other):
- self.traces += other.traces
- self.errors += other.errors
- return self
-
- def __str__(self): # pragma: no cover
- return str(self.ToDict())
-
-
-class Node(object):
- """Represents a node in the benchmark suite tree structure."""
- def __init__(self, *args):
- self._children = []
-
- def AppendChild(self, child):
- self._children.append(child)
-
-
-class DefaultSentinel(Node):
- """Fake parent node with all default values."""
- def __init__(self):
- super(DefaultSentinel, self).__init__()
- self.binary = "d8"
- self.run_count = 10
- self.path = []
- self.graphs = []
- self.flags = []
- self.resources = []
- self.results_regexp = None
- self.stddev_regexp = None
- self.units = "score"
- self.total = False
-
-
-class Graph(Node):
- """Represents a benchmark suite definition.
-
- Can either be a leaf or an inner node that provides default values.
- """
- def __init__(self, suite, parent, arch):
- super(Graph, self).__init__()
- self._suite = suite
-
- assert isinstance(suite.get("path", []), list)
- assert isinstance(suite["name"], basestring)
- assert isinstance(suite.get("flags", []), list)
- assert isinstance(suite.get("resources", []), list)
-
- # Accumulated values.
- self.path = parent.path[:] + suite.get("path", [])
- self.graphs = parent.graphs[:] + [suite["name"]]
- self.flags = parent.flags[:] + suite.get("flags", [])
- self.resources = parent.resources[:] + suite.get("resources", [])
-
- # Descrete values (with parent defaults).
- self.binary = suite.get("binary", parent.binary)
- self.run_count = suite.get("run_count", parent.run_count)
- self.run_count = suite.get("run_count_%s" % arch, self.run_count)
- self.units = suite.get("units", parent.units)
- self.total = suite.get("total", parent.total)
-
- # A regular expression for results. If the parent graph provides a
- # regexp and the current suite has none, a string place holder for the
- # suite name is expected.
- # TODO(machenbach): Currently that makes only sense for the leaf level.
- # Multiple place holders for multiple levels are not supported.
- if parent.results_regexp:
- regexp_default = parent.results_regexp % re.escape(suite["name"])
- else:
- regexp_default = None
- self.results_regexp = suite.get("results_regexp", regexp_default)
-
- # A similar regular expression for the standard deviation (optional).
- if parent.stddev_regexp:
- stddev_default = parent.stddev_regexp % re.escape(suite["name"])
- else:
- stddev_default = None
- self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
-
-
-class Trace(Graph):
- """Represents a leaf in the benchmark suite tree structure.
-
- Handles collection of measurements.
- """
- def __init__(self, suite, parent, arch):
- super(Trace, self).__init__(suite, parent, arch)
- assert self.results_regexp
- self.results = []
- self.errors = []
- self.stddev = ""
-
- def ConsumeOutput(self, stdout):
- try:
- self.results.append(
- re.search(self.results_regexp, stdout, re.M).group(1))
- except:
- self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
- % (self.results_regexp, self.graphs[-1]))
-
- try:
- if self.stddev_regexp and self.stddev:
- self.errors.append("Benchmark %s should only run once since a
stddev "
- "is provided by the benchmark." %
self.graphs[-1])
- if self.stddev_regexp:
- self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
- except:
- self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
- % (self.stddev_regexp, self.graphs[-1]))
-
- def GetResults(self):
- return Results([{
- "graphs": self.graphs,
- "units": self.units,
- "results": self.results,
- "stddev": self.stddev,
- }], self.errors)
-
-
-class Runnable(Graph):
- """Represents a runnable benchmark suite definition (i.e. has a main
file).
- """
- @property
- def main(self):
- return self._suite.get("main", "")
-
- def ChangeCWD(self, suite_path):
- """Changes the cwd to to path defined in the current graph.
-
- The benchmarks are supposed to be relative to the suite configuration.
- """
- suite_dir = os.path.abspath(os.path.dirname(suite_path))
- bench_dir = os.path.normpath(os.path.join(*self.path))
- os.chdir(os.path.join(suite_dir, bench_dir))
-
- def GetCommand(self, shell_dir):
- # TODO(machenbach): This requires +.exe if run on windows.
- return (
- [os.path.join(shell_dir, self.binary)] +
- self.flags +
- self.resources +
- [self.main]
- )
-
- def Run(self, runner):
- """Iterates over several runs and handles the output for all traces."""
- for stdout in runner():
- for trace in self._children:
- trace.ConsumeOutput(stdout)
- res = reduce(lambda r, t: r + t.GetResults(), self._children,
Results())
-
- if not res.traces or not self.total:
- return res
-
- # Assume all traces have the same structure.
- if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
- res.errors.append("Not all traces have the same number of results.")
- return res
-
- # Calculate the geometric means for all traces. Above we made sure that
- # there is at least one trace and that the number of results is the
same
- # for each trace.
- n_results = len(res.traces[0]["results"])
- total_results = [GeometricMean(t["results"][i] for t in res.traces)
- for i in range(0, n_results)]
- res.traces.append({
- "graphs": self.graphs + ["Total"],
- "units": res.traces[0]["units"],
- "results": total_results,
- "stddev": "",
- })
- return res
-
-class RunnableTrace(Trace, Runnable):
- """Represents a runnable benchmark suite definition that is a leaf."""
- def __init__(self, suite, parent, arch):
- super(RunnableTrace, self).__init__(suite, parent, arch)
-
- def Run(self, runner):
- """Iterates over several runs and handles the output."""
- for stdout in runner():
- self.ConsumeOutput(stdout)
- return self.GetResults()
-
-
-class RunnableGeneric(Runnable):
- """Represents a runnable benchmark suite definition with generic
traces."""
- def __init__(self, suite, parent, arch):
- super(RunnableGeneric, self).__init__(suite, parent, arch)
-
- def Run(self, runner):
- """Iterates over several runs and handles the output."""
- traces = {}
- for stdout in runner():
- for line in stdout.strip().splitlines():
- match = GENERIC_RESULTS_RE.match(line)
- if match:
- trace = match.group(1)
- result = match.group(2)
- stddev = match.group(3)
- trace_result = traces.setdefault(trace, Results([{
- "graphs": self.graphs + [trace],
- "units": self.units,
- "results": [],
- "stddev": "",
- }], []))
- trace_result.traces[0]["results"].append(result)
- trace_result.traces[0]["stddev"] = stddev
-
- return reduce(lambda r, t: r + t, traces.itervalues(), Results())
-
-
-def MakeGraph(suite, arch, parent):
- """Factory method for making graph objects."""
- if isinstance(parent, Runnable):
- # Below a runnable can only be traces.
- return Trace(suite, parent, arch)
- elif suite.get("main"):
- # A main file makes this graph runnable.
- if suite.get("benchmarks"):
- # This graph has subbenchmarks (traces).
- return Runnable(suite, parent, arch)
- else:
- # This graph has no subbenchmarks, it's a leaf.
- return RunnableTrace(suite, parent, arch)
- elif suite.get("generic"):
- # This is a generic suite definition. It is either a runnable
executable
- # or has a main js file.
- return RunnableGeneric(suite, parent, arch)
- elif suite.get("benchmarks"):
- # This is neither a leaf nor a runnable.
- return Graph(suite, parent, arch)
- else: # pragma: no cover
- raise Exception("Invalid benchmark suite configuration.")
-
-
-def BuildGraphs(suite, arch, parent=None):
- """Builds a tree structure of graph objects that corresponds to the suite
- configuration.
- """
- parent = parent or DefaultSentinel()
-
- # TODO(machenbach): Implement notion of cpu type?
- if arch not in suite.get("archs", ["ia32", "x64"]):
- return None
-
- graph = MakeGraph(suite, arch, parent)
- for subsuite in suite.get("benchmarks", []):
- BuildGraphs(subsuite, arch, graph)
- parent.AppendChild(graph)
- return graph
-
-
-def FlattenRunnables(node):
- """Generator that traverses the tree structure and iterates over all
- runnables.
- """
- if isinstance(node, Runnable):
- yield node
- elif isinstance(node, Node):
- for child in node._children:
- for result in FlattenRunnables(child):
- yield result
- else: # pragma: no cover
- raise Exception("Invalid benchmark suite configuration.")
-
-
-# TODO: Implement results_processor.
-def Main(args):
- parser = optparse.OptionParser()
- parser.add_option("--arch",
- help=("The architecture to run tests for, "
- "'auto' or 'native' for auto-detect"),
- default="x64")
- parser.add_option("--buildbot",
- help="Adapt to path structure used on buildbots",
- default=False, action="store_true")
- parser.add_option("--json-test-results",
- help="Path to a file for storing json results.")
- parser.add_option("--outdir", help="Base directory with compile output",
- default="out")
- (options, args) = parser.parse_args(args)
-
- if len(args) == 0: # pragma: no cover
- parser.print_help()
- return 1
-
- if options.arch in ["auto", "native"]: # pragma: no cover
- options.arch = ARCH_GUESS
-
- if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
- print "Unknown architecture %s" % options.arch
- return 1
-
- workspace =
os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
-
- if options.buildbot:
- shell_dir = os.path.join(workspace, options.outdir, "Release")
- else:
- shell_dir = os.path.join(workspace, options.outdir,
- "%s.release" % options.arch)
-
- results = Results()
- for path in args:
- path = os.path.abspath(path)
-
- if not os.path.exists(path): # pragma: no cover
- results.errors.append("Benchmark file %s does not exist." % path)
- continue
-
- with open(path) as f:
- suite = json.loads(f.read())
-
- # If no name is given, default to the file name without .json.
- suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
-
- for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)):
- print ">>> Running suite: %s" % "/".join(runnable.graphs)
- runnable.ChangeCWD(path)
-
- def Runner():
- """Output generator that reruns several times."""
- for i in xrange(0, max(1, runnable.run_count)):
- # TODO(machenbach): Make timeout configurable in the suite
definition.
- # Allow timeout per arch like with run_count per arch.
- output = commands.Execute(runnable.GetCommand(shell_dir),
timeout=60)
- print ">>> Stdout (#%d):" % (i + 1)
- print output.stdout
- if output.stderr: # pragma: no cover
- # Print stderr for debugging.
- print ">>> Stderr (#%d):" % (i + 1)
- print output.stderr
- yield output.stdout
-
- # Let runnable iterate over all runs and handle output.
- results += runnable.Run(Runner)
-
- if options.json_test_results:
- results.WriteToFile(options.json_test_results)
- else: # pragma: no cover
- print results
-
- return min(1, len(results.errors))
-
-if __name__ == "__main__": # pragma: no cover
- sys.exit(Main(sys.argv[1:]))
=======================================
--- /trunk/tools/unittests/run_benchmarks_test.py Fri Aug 29 00:04:38 2014
UTC
+++ /dev/null
@@ -1,349 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from collections import namedtuple
-import coverage
-import json
-from mock import DEFAULT
-from mock import MagicMock
-import os
-from os import path, sys
-import shutil
-import tempfile
-import unittest
-
-# Requires python-coverage and python-mock. Native python coverage
-# version >= 3.7.1 should be installed to get the best speed.
-
-TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-benchmarks")
-
-V8_JSON = {
- "path": ["."],
- "binary": "d7",
- "flags": ["--flag"],
- "main": "run.js",
- "run_count": 1,
- "results_regexp": "^%s: (.+)$",
- "benchmarks": [
- {"name": "Richards"},
- {"name": "DeltaBlue"},
- ]
-}
-
-V8_NESTED_SUITES_JSON = {
- "path": ["."],
- "flags": ["--flag"],
- "run_count": 1,
- "units": "score",
- "benchmarks": [
- {"name": "Richards",
- "path": ["richards"],
- "binary": "d7",
- "main": "run.js",
- "resources": ["file1.js", "file2.js"],
- "run_count": 2,
- "results_regexp": "^Richards: (.+)$"},
- {"name": "Sub",
- "path": ["sub"],
- "benchmarks": [
- {"name": "Leaf",
- "path": ["leaf"],
- "run_count_x64": 3,
- "units": "ms",
- "main": "run.js",
- "results_regexp": "^Simple: (.+) ms.$"},
- ]
- },
- {"name": "DeltaBlue",
- "path": ["delta_blue"],
- "main": "run.js",
- "flags": ["--flag2"],
- "results_regexp": "^DeltaBlue: (.+)$"},
- {"name": "ShouldntRun",
- "path": ["."],
- "archs": ["arm"],
- "main": "run.js"},
- ]
-}
-
-V8_GENERIC_JSON = {
- "path": ["."],
- "binary": "cc",
- "flags": ["--flag"],
- "generic": True,
- "run_count": 1,
- "units": "ms",
-}
-
-Output = namedtuple("Output", "stdout, stderr")
-
-class BenchmarksTest(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- cls.base = path.dirname(path.dirname(path.abspath(__file__)))
- sys.path.append(cls.base)
- cls._cov = coverage.coverage(
- include=([os.path.join(cls.base, "run_benchmarks.py")]))
- cls._cov.start()
- import run_benchmarks
- from testrunner.local import commands
- global commands
- global run_benchmarks
-
- @classmethod
- def tearDownClass(cls):
- cls._cov.stop()
- print ""
- print cls._cov.report()
-
- def setUp(self):
- self.maxDiff = None
- if path.exists(TEST_WORKSPACE):
- shutil.rmtree(TEST_WORKSPACE)
- os.makedirs(TEST_WORKSPACE)
-
- def tearDown(self):
- if path.exists(TEST_WORKSPACE):
- shutil.rmtree(TEST_WORKSPACE)
-
- def _WriteTestInput(self, json_content):
- self._test_input = path.join(TEST_WORKSPACE, "test.json")
- with open(self._test_input, "w") as f:
- f.write(json.dumps(json_content))
-
- def _MockCommand(self, *args):
- # Fake output for each benchmark run.
- benchmark_outputs = [Output(stdout=arg, stderr=None) for arg in
args[1]]
- def execute(*args, **kwargs):
- return benchmark_outputs.pop()
- commands.Execute = MagicMock(side_effect=execute)
-
- # Check that d8 is called from the correct cwd for each benchmark run.
- dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
- def chdir(*args, **kwargs):
- self.assertEquals(dirs.pop(), args[0])
- os.chdir = MagicMock(side_effect=chdir)
-
- def _CallMain(self, *args):
- self._test_output = path.join(TEST_WORKSPACE, "results.json")
- all_args=[
- "--json-test-results",
- self._test_output,
- self._test_input,
- ]
- all_args += args
- return run_benchmarks.Main(all_args)
-
- def _LoadResults(self):
- with open(self._test_output) as f:
- return json.load(f)
-
- def _VerifyResults(self, suite, units, traces):
- self.assertEquals([
- {"units": units,
- "graphs": [suite, trace["name"]],
- "results": trace["results"],
- "stddev": trace["stddev"]} for trace in traces],
- self._LoadResults()["traces"])
-
- def _VerifyErrors(self, errors):
- self.assertEquals(errors, self._LoadResults()["errors"])
-
- def _VerifyMock(self, binary, *args):
- arg = [path.join(path.dirname(self.base), binary)]
- arg += args
- commands.Execute.assert_called_with(arg, timeout=60)
-
- def _VerifyMockMultiple(self, *args):
- expected = []
- for arg in args:
- a = [path.join(path.dirname(self.base), arg[0])]
- a += arg[1:]
- expected.append(((a,), {"timeout": 60}))
- self.assertEquals(expected, commands.Execute.call_args_list)
-
- def testOneRun(self):
- self._WriteTestInput(V8_JSON)
- self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue:
10657567\ny\n"])
- self.assertEquals(0, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
- ])
- self._VerifyErrors([])
-
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
-
- def testTwoRuns_Units_SuiteName(self):
- test_input = dict(V8_JSON)
- test_input["run_count"] = 2
- test_input["name"] = "v8"
- test_input["units"] = "ms"
- self._WriteTestInput(test_input)
- self._MockCommand([".", "."],
- ["Richards: 100\nDeltaBlue: 200\n",
- "Richards: 50\nDeltaBlue: 300\n"])
- self.assertEquals(0, self._CallMain())
- self._VerifyResults("v8", "ms", [
- {"name": "Richards", "results": ["50", "100"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
- ])
- self._VerifyErrors([])
-
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
-
- def testTwoRuns_SubRegexp(self):
- test_input = dict(V8_JSON)
- test_input["run_count"] = 2
- del test_input["results_regexp"]
- test_input["benchmarks"][0]["results_regexp"] = "^Richards: (.+)$"
- test_input["benchmarks"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
- self._WriteTestInput(test_input)
- self._MockCommand([".", "."],
- ["Richards: 100\nDeltaBlue: 200\n",
- "Richards: 50\nDeltaBlue: 300\n"])
- self.assertEquals(0, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["50", "100"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
- ])
- self._VerifyErrors([])
-
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
-
- def testNestedSuite(self):
- self._WriteTestInput(V8_NESTED_SUITES_JSON)
- self._MockCommand(["delta_blue", "sub/leaf", "richards"],
- ["DeltaBlue: 200\n",
- "Simple: 1 ms.\n",
- "Simple: 2 ms.\n",
- "Simple: 3 ms.\n",
- "Richards: 100\n",
- "Richards: 50\n"])
- self.assertEquals(0, self._CallMain())
- self.assertEquals([
- {"units": "score",
- "graphs": ["test", "Richards"],
- "results": ["50", "100"],
- "stddev": ""},
- {"units": "ms",
- "graphs": ["test", "Sub", "Leaf"],
- "results": ["3", "2", "1"],
- "stddev": ""},
- {"units": "score",
- "graphs": ["test", "DeltaBlue"],
- "results": ["200"],
- "stddev": ""},
- ], self._LoadResults()["traces"])
- self._VerifyErrors([])
- self._VerifyMockMultiple(
- (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
- "file2.js", "run.js"),
- (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
- "file2.js", "run.js"),
- (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
- (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
- (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
-
(path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))
-
- def testOneRunStdDevRegExp(self):
- test_input = dict(V8_JSON)
- test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
- self._WriteTestInput(test_input)
- self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n"
- "DeltaBlue: 10657567\nDeltaBlue-stddev:
106\n"])
- self.assertEquals(0, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": "0.23"},
- {"name": "DeltaBlue", "results": ["10657567"], "stddev": "106"},
- ])
- self._VerifyErrors([])
-
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
-
- def testTwoRunsStdDevRegExp(self):
- test_input = dict(V8_JSON)
- test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
- test_input["run_count"] = 2
- self._WriteTestInput(test_input)
- self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n"
- "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
- "Richards: 2\nRichards-stddev: 0.5\n"
- "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
- self.assertEquals(1, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["2", "3"], "stddev": "0.7"},
- {"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"},
- ])
- self._VerifyErrors(
- ["Benchmark Richards should only run once since a stddev is
provided "
- "by the benchmark.",
- "Benchmark DeltaBlue should only run once since a stddev is
provided "
- "by the benchmark.",
- "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for benchmark "
- "DeltaBlue."])
-
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
-
- def testBuildbot(self):
- self._WriteTestInput(V8_JSON)
- self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
- self.assertEquals(0, self._CallMain("--buildbot"))
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
- ])
- self._VerifyErrors([])
- self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
-
- def testBuildbotWithTotal(self):
- test_input = dict(V8_JSON)
- test_input["total"] = True
- self._WriteTestInput(test_input)
- self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
- self.assertEquals(0, self._CallMain("--buildbot"))
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
- {"name": "Total", "results": ["3626.49109719"], "stddev": ""},
- ])
- self._VerifyErrors([])
- self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
-
- def testBuildbotWithTotalAndErrors(self):
- test_input = dict(V8_JSON)
- test_input["total"] = True
- self._WriteTestInput(test_input)
- self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue:
10657567\ny\n"])
- self.assertEquals(1, self._CallMain("--buildbot"))
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": [], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
- ])
- self._VerifyErrors(
- ["Regexp \"^Richards: (.+)$\" didn't match for benchmark
Richards.",
- "Not all traces have the same number of results."])
- self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
-
- def testRegexpNoMatch(self):
- self._WriteTestInput(V8_JSON)
- self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue:
10657567\ny\n"])
- self.assertEquals(1, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": [], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
- ])
- self._VerifyErrors(
- ["Regexp \"^Richards: (.+)$\" didn't match for benchmark
Richards."])
-
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
-
- def testOneRunGeneric(self):
- test_input = dict(V8_GENERIC_JSON)
- self._WriteTestInput(test_input)
- self._MockCommand(["."], [
- "Trace(Test1), Result(1.234), StdDev(0.23)\n"
- "Trace(Test2), Result(10657567), StdDev(106)\n"])
- self.assertEquals(0, self._CallMain())
- self._VerifyResults("test", "ms", [
- {"name": "Test1", "results": ["1.234"], "stddev": "0.23"},
- {"name": "Test2", "results": ["10657567"], "stddev": "106"},
- ])
- self._VerifyErrors([])
- self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
=======================================
--- /trunk/BUILD.gn Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/BUILD.gn Tue Sep 16 07:50:38 2014 UTC
@@ -1129,6 +1129,11 @@
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ]
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:optimize" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
defines = []
deps = [ ":v8_libbase" ]
@@ -1214,6 +1219,11 @@
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config_base", ":features", ":toolchain" ]
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:optimize" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
defines = []
if (is_posix) {
@@ -1277,6 +1287,11 @@
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config_base", ":features", ":toolchain" ]
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:optimize" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
deps = [
":v8_libbase",
]
=======================================
--- /trunk/ChangeLog Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/ChangeLog Tue Sep 16 07:50:38 2014 UTC
@@ -1,3 +1,13 @@
+2014-09-16: Version 3.29.66
+
+ Currently, a new isolate is created in an uninitialized state, and
+ several API methods will automatically initialize it. During this
+ uninitialized state, code event handlers and function entry
handlers can
+ be attached to the isolate.
+
+ Performance and stability improvements on all platforms.
+
+
2014-09-15: Version 3.29.64
ES6: String(symbol) should work like symbol.toString (issue 3554).
=======================================
--- /trunk/PRESUBMIT.py Thu Sep 11 00:05:22 2014 UTC
+++ /trunk/PRESUBMIT.py Tue Sep 16 07:50:38 2014 UTC
@@ -180,6 +180,6 @@
'v8_linux_layout_dbg': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
- 'v8_win64_rel': set(['defaulttests']),
+ 'v8_win64_compile_rel': set(['defaulttests']),
},
}
=======================================
--- /trunk/include/v8.h Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/include/v8.h Tue Sep 16 07:50:38 2014 UTC
@@ -4243,6 +4243,103 @@
class RetainedObjectInfo;
+
+/**
+ * FunctionEntryHook is the type of the profile entry hook called at entry
to
+ * any generated function when function-level profiling is enabled.
+ *
+ * \param function the address of the function that's being entered.
+ * \param return_addr_location points to a location on stack where the
machine
+ * return address resides. This can be used to identify the caller of
+ * \p function, and/or modified to divert execution when \p function
exits.
+ *
+ * \note the entry hook must not cause garbage collection.
+ */
+typedef void (*FunctionEntryHook)(uintptr_t function,
+ uintptr_t return_addr_location);
+
+/**
+ * A JIT code event is issued each time code is added, moved or removed.
+ *
+ * \note removal events are not currently issued.
+ */
+struct JitCodeEvent {
+ enum EventType {
+ CODE_ADDED,
+ CODE_MOVED,
+ CODE_REMOVED,
+ CODE_ADD_LINE_POS_INFO,
+ CODE_START_LINE_INFO_RECORDING,
+ CODE_END_LINE_INFO_RECORDING
+ };
+ // Definition of the code position type. The "POSITION" type means the
place
+ // in the source code which are of interest when making stack traces to
+ // pin-point the source location of a stack frame as close as possible.
+ // The "STATEMENT_POSITION" means the place at the beginning of each
+ // statement, and is used to indicate possible break locations.
+ enum PositionType { POSITION, STATEMENT_POSITION };
+
+ // Type of event.
+ EventType type;
+ // Start of the instructions.
+ void* code_start;
+ // Size of the instructions.
+ size_t code_len;
+ // Script info for CODE_ADDED event.
+ Handle<UnboundScript> script;
+ // User-defined data for *_LINE_INFO_* event. It's used to hold the
source
+ // code line information which is returned from the
+ // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
+ // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
+ void* user_data;
+
+ struct name_t {
+ // Name of the object associated with the code, note that the string
is not
+ // zero-terminated.
+ const char* str;
+ // Number of chars in str.
+ size_t len;
+ };
+
+ struct line_info_t {
+ // PC offset
+ size_t offset;
+ // Code postion
+ size_t pos;
+ // The position type.
+ PositionType position_type;
+ };
+
+ union {
+ // Only valid for CODE_ADDED.
+ struct name_t name;
+
+ // Only valid for CODE_ADD_LINE_POS_INFO
+ struct line_info_t line_info;
+
+ // New location of instructions. Only valid for CODE_MOVED.
+ void* new_code_start;
+ };
+};
+
+/**
+ * Option flags passed to the SetJitCodeEventHandler function.
+ */
+enum JitCodeEventOptions {
+ kJitCodeEventDefault = 0,
+ // Generate callbacks for already existent code.
+ kJitCodeEventEnumExisting = 1
+};
+
+
+/**
+ * Callback function passed to SetJitCodeEventHandler.
+ *
+ * \param event code add, move or removal event.
+ */
+typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
+
+
/**
* Isolate represents an isolated instance of the V8 engine. V8
* isolates have completely separate states. Objects from one isolate
@@ -4254,6 +4351,29 @@
*/
class V8_EXPORT Isolate {
public:
+ /**
+ * Initial configuration parameters for a new Isolate.
+ */
+ struct CreateParams {
+ CreateParams() : entry_hook(NULL), code_event_handler(NULL) {}
+
+ /**
+ * The optional entry_hook allows the host application to provide the
+ * address of a function that's invoked on entry to every V8-generated
+ * function. Note that entry_hook is invoked at the very start of each
+ * generated function. Furthermore, if an entry_hook is given, V8 will
+ * always run without a context snapshot.
+ */
+ FunctionEntryHook entry_hook;
+
+ /**
+ * Allows the host application to provide the address of a function
that is
+ * notified each time code is added, moved or removed.
+ */
+ JitCodeEventHandler code_event_handler;
+ };
+
+
/**
* Stack-allocated class which sets the isolate for all operations
* executed within a local scope.
@@ -4362,7 +4482,7 @@
* When an isolate is no longer used its resources should be freed
* by calling Dispose(). Using the delete operator is not allowed.
*/
- static Isolate* New();
+ static Isolate* New(const CreateParams& params = CreateParams());
/**
* Returns the entered isolate for the current thread or NULL in
@@ -4672,6 +4792,31 @@
*/
int ContextDisposedNotification();
+ /**
+ * Allows the host application to provide the address of a function that
is
+ * notified each time code is added, moved or removed.
+ *
+ * \param options options for the JIT code event handler.
+ * \param event_handler the JIT code event handler, which will be invoked
+ * each time code is added, moved or removed.
+ * \note \p event_handler won't get notified of existent code.
+ * \note since code removal notifications are not currently issued, the
+ * \p event_handler may get notifications of code that overlaps
earlier
+ * code notifications. This happens when code areas are reused, and
the
+ * earlier overlapping code areas should therefore be discarded.
+ * \note the events passed to \p event_handler and the strings they
point to
+ * are not guaranteed to live past each call. The \p event_handler
must
+ * copy strings and other parameters it needs to keep around.
+ * \note the set of events declared in JitCodeEvent::EventType is
expected to
+ * grow over time, and the JitCodeEvent structure is expected to
accrue
+ * new members. The \p event_handler function must ignore event codes
+ * it does not recognize to maintain future compatibility.
+ * \note Use Isolate::CreateParams to get events for code executed during
+ * Isolate setup.
+ */
+ void SetJitCodeEventHandler(JitCodeEventOptions options,
+ JitCodeEventHandler event_handler);
+
private:
template<class K, class V, class Traits> friend class PersistentValueMap;
@@ -4750,106 +4895,6 @@
uintptr_t return_addr_location);
-/**
- * FunctionEntryHook is the type of the profile entry hook called at entry
to
- * any generated function when function-level profiling is enabled.
- *
- * \param function the address of the function that's being entered.
- * \param return_addr_location points to a location on stack where the
machine
- * return address resides. This can be used to identify the caller of
- * \p function, and/or modified to divert execution when \p function
exits.
- *
- * \note the entry hook must not cause garbage collection.
- */
-typedef void (*FunctionEntryHook)(uintptr_t function,
- uintptr_t return_addr_location);
-
-
-/**
- * A JIT code event is issued each time code is added, moved or removed.
- *
- * \note removal events are not currently issued.
- */
-struct JitCodeEvent {
- enum EventType {
- CODE_ADDED,
- CODE_MOVED,
- CODE_REMOVED,
- CODE_ADD_LINE_POS_INFO,
- CODE_START_LINE_INFO_RECORDING,
- CODE_END_LINE_INFO_RECORDING
- };
- // Definition of the code position type. The "POSITION" type means the
place
- // in the source code which are of interest when making stack traces to
- // pin-point the source location of a stack frame as close as possible.
- // The "STATEMENT_POSITION" means the place at the beginning of each
- // statement, and is used to indicate possible break locations.
- enum PositionType {
- POSITION,
- STATEMENT_POSITION
- };
-
- // Type of event.
- EventType type;
- // Start of the instructions.
- void* code_start;
- // Size of the instructions.
- size_t code_len;
- // Script info for CODE_ADDED event.
- Handle<UnboundScript> script;
- // User-defined data for *_LINE_INFO_* event. It's used to hold the
source
- // code line information which is returned from the
- // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
- // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
- void* user_data;
-
- struct name_t {
- // Name of the object associated with the code, note that the string
is not
- // zero-terminated.
- const char* str;
- // Number of chars in str.
- size_t len;
- };
-
- struct line_info_t {
- // PC offset
- size_t offset;
- // Code postion
- size_t pos;
- // The position type.
- PositionType position_type;
- };
-
- union {
- // Only valid for CODE_ADDED.
- struct name_t name;
-
- // Only valid for CODE_ADD_LINE_POS_INFO
- struct line_info_t line_info;
-
- // New location of instructions. Only valid for CODE_MOVED.
- void* new_code_start;
- };
-};
-
-/**
- * Option flags passed to the SetJitCodeEventHandler function.
- */
-enum JitCodeEventOptions {
- kJitCodeEventDefault = 0,
- // Generate callbacks for already existent code.
- kJitCodeEventEnumExisting = 1
-};
-
-
-/**
- * Callback function passed to SetJitCodeEventHandler.
- *
- * \param event code add, move or removal event.
- */
-typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
-
-
/**
* Interface for iterating through all external resources in the heap.
*/
@@ -5069,6 +5114,8 @@
* \returns true on success on supported platforms, false on failure.
* \note Setting an entry hook can only be done very early in an isolates
* lifetime, and once set, the entry hook cannot be revoked.
+ *
+ * Deprecated, will be removed. Use Isolate::New(entry_hook) instead.
*/
static bool SetFunctionEntryHook(Isolate* isolate,
FunctionEntryHook entry_hook);
@@ -5092,6 +5139,9 @@
* grow over time, and the JitCodeEvent structure is expected to
accrue
* new members. The \p event_handler function must ignore event codes
* it does not recognize to maintain future compatibility.
+ *
+ * Deprecated, will be removed. Use Isolate::SetJitCodeEventHandler or
+ * Isolate::CreateParams instead.
*/
static void SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler);
=======================================
--- /trunk/src/api.cc Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/src/api.cc Tue Sep 16 07:50:38 2014 UTC
@@ -6662,8 +6662,16 @@
}
-Isolate* Isolate::New() {
+Isolate* Isolate::New(const Isolate::CreateParams& params) {
i::Isolate* isolate = new i::Isolate();
+ if (params.entry_hook) {
+ isolate->set_function_entry_hook(params.entry_hook);
+ }
+ if (params.code_event_handler) {
+ isolate->InitializeLoggingAndCounters();
+ isolate->logger()->SetCodeEventHandler(kJitCodeEventDefault,
+ params.code_event_handler);
+ }
return reinterpret_cast<Isolate*>(isolate);
}
@@ -6870,6 +6878,15 @@
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->heap()->NotifyContextDisposed();
}
+
+
+void v8::Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
+ JitCodeEventHandler
event_handler) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ // Ensure that logging is initialized for our isolate.
+ isolate->InitializeLoggingAndCounters();
+ isolate->logger()->SetCodeEventHandler(options, event_handler);
+}
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
=======================================
--- /trunk/src/ast.cc Tue Sep 9 00:05:04 2014 UTC
+++ /trunk/src/ast.cc Tue Sep 16 07:50:38 2014 UTC
@@ -997,45 +997,52 @@
entry_id_(id_gen->GetNextId()) {}
-#define REGULAR_NODE(NodeType) \
+#define REGULAR_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
+ increase_node_count(); \
}
-#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_slot_node(node); \
+ increase_node_count(); \
+ add_slot_node(node); \
}
-#define DONT_OPTIMIZE_NODE(NodeType) \
+#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- set_dont_optimize_reason(k##NodeType); \
- add_flag(kDontSelfOptimize); \
+ increase_node_count(); \
+ set_dont_crankshaft_reason(k##NodeType); \
+ add_flag(kDontSelfOptimize); \
}
-#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_slot_node(node); \
- set_dont_optimize_reason(k##NodeType); \
- add_flag(kDontSelfOptimize); \
+ increase_node_count(); \
+ add_slot_node(node); \
+ set_dont_crankshaft_reason(k##NodeType); \
+ add_flag(kDontSelfOptimize); \
+ }
+#define DONT_TURBOFAN_NODE(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ set_dont_crankshaft_reason(k##NodeType); \
+ set_dont_turbofan_reason(k##NodeType); \
+ add_flag(kDontSelfOptimize); \
}
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_flag(kDontSelfOptimize); \
+ increase_node_count(); \
+ add_flag(kDontSelfOptimize); \
}
-#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_slot_node(node); \
- add_flag(kDontSelfOptimize); \
+ increase_node_count(); \
+ add_slot_node(node); \
+ add_flag(kDontSelfOptimize); \
}
-#define DONT_CACHE_NODE(NodeType) \
+#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- set_dont_optimize_reason(k##NodeType); \
- add_flag(kDontSelfOptimize); \
- add_flag(kDontCache); \
+ increase_node_count(); \
+ set_dont_crankshaft_reason(k##NodeType); \
+ add_flag(kDontSelfOptimize); \
+ add_flag(kDontCache); \
}
REGULAR_NODE(VariableDeclaration)
@@ -1082,15 +1089,17 @@
DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement)
-DONT_OPTIMIZE_NODE(ForOfStatement)
-DONT_OPTIMIZE_NODE(TryCatchStatement)
-DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_OPTIMIZE_NODE(SuperReference)
DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(Yield)
+// TODO(turbofan): Remove the dont_turbofan_reason once this list is empty.
+DONT_TURBOFAN_NODE(ForOfStatement)
+DONT_TURBOFAN_NODE(TryCatchStatement)
+DONT_TURBOFAN_NODE(TryFinallyStatement)
+
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
@@ -1105,7 +1114,7 @@
add_slot_node(node);
if (node->is_jsruntime()) {
// Don't try to optimize JS runtime calls because we bailout on them.
- set_dont_optimize_reason(kCallToAJavaScriptRuntimeFunction);
+ set_dont_crankshaft_reason(kCallToAJavaScriptRuntimeFunction);
}
}
=======================================
--- /trunk/src/ast.h Thu Sep 11 00:05:22 2014 UTC
+++ /trunk/src/ast.h Tue Sep 16 07:50:38 2014 UTC
@@ -2973,10 +2973,17 @@
class AstConstructionVisitor BASE_EMBEDDED {
public:
- AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
+ AstConstructionVisitor()
+ : dont_crankshaft_reason_(kNoReason),
dont_turbofan_reason_(kNoReason) {}
AstProperties* ast_properties() { return &properties_; }
- BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ BailoutReason dont_optimize_reason() {
+ if (dont_turbofan_reason_ != kNoReason) {
+ return dont_turbofan_reason_;
+ } else {
+ return dont_crankshaft_reason_;
+ }
+ }
private:
template<class> friend class AstNodeFactory;
@@ -2989,8 +2996,11 @@
void increase_node_count() { properties_.add_node_count(1); }
void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
- void set_dont_optimize_reason(BailoutReason reason) {
- dont_optimize_reason_ = reason;
+ void set_dont_crankshaft_reason(BailoutReason reason) {
+ dont_crankshaft_reason_ = reason;
+ }
+ void set_dont_turbofan_reason(BailoutReason reason) {
+ dont_turbofan_reason_ = reason;
}
void add_slot_node(FeedbackSlotInterface* slot_node) {
@@ -3002,7 +3012,8 @@
}
AstProperties properties_;
- BailoutReason dont_optimize_reason_;
+ BailoutReason dont_crankshaft_reason_;
+ BailoutReason dont_turbofan_reason_;
};
=======================================
--- /trunk/src/base/build_config.h Fri Sep 12 00:05:16 2014 UTC
+++ /trunk/src/base/build_config.h Tue Sep 16 07:50:38 2014 UTC
@@ -21,7 +21,6 @@
// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
#else
#define V8_HOST_ARCH_X64 1
#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4 // Check for x32.
@@ -29,16 +28,13 @@
#else
#define V8_HOST_ARCH_64_BIT 1
#endif
-#define V8_HOST_CAN_READ_UNALIGNED 1
#endif // __native_client__
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__AARCH64EL__)
#define V8_HOST_ARCH_ARM64 1
#define V8_HOST_ARCH_64_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64-unittest.cc Mon
Sep 15 00:05:18 2014 UTC
+++ /trunk/src/compiler/arm64/instruction-selector-arm64-unittest.cc Tue
Sep 16 07:50:38 2014 UTC
@@ -971,6 +971,133 @@
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
+
+
+//
-----------------------------------------------------------------------------
+// Comparison instructions.
+
+static const MachInst2 kComparisonInstructions[] = {
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
kMachInt32},
+ {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp,
kMachInt64},
+};
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorComparisonTest;
+
+
+TEST_P(InstructionSelectorComparisonTest, WithParameters) {
+ const MachInst2 cmp = GetParam();
+ const MachineType type = cmp.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
+ const MachInst2 cmp = GetParam();
+ const MachineType type = cmp.machine_type;
+ // TODO(all): Add support for testing 64-bit immediates.
+ if (type == kMachInt32) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // Compare with 0 are turned into tst instruction.
+ if (imm == 0) continue;
+ StreamBuilder m(this, type, type);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // Compare with 0 are turned into tst instruction.
+ if (imm == 0) continue;
+ StreamBuilder m(this, type, type);
+ m.Return((m.*cmp.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorComparisonTest,
+ ::testing::ValuesIn(kComparisonInstructions));
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Equal(m.Int64Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
} // namespace compiler
} // namespace internal
=======================================
--- /trunk/src/compiler/ast-graph-builder.cc Fri Sep 12 00:05:16 2014 UTC
+++ /trunk/src/compiler/ast-graph-builder.cc Tue Sep 16 07:50:38 2014 UTC
@@ -956,7 +956,8 @@
Node* attr = jsgraph()->Constant(NONE);
const Operator* op =
javascript()->Runtime(Runtime::kDefineAccessorPropertyUnchecked,
5);
- NewNode(op, literal, name, getter, setter, attr);
+ Node* call = NewNode(op, literal, name, getter, setter, attr);
+ PrepareFrameState(call, it->first->id());
}
// Transform literals that contain functions to fast properties.
@@ -1421,8 +1422,10 @@
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->expression()->AsVariableProxy()->var();
+ environment()->Push(value);
BuildVariableAssignment(variable, value, expr->op(),
expr->AssignmentId());
+ environment()->Pop();
break;
}
case NAMED_PROPERTY: {
@@ -1431,7 +1434,9 @@
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
Node* store =
NewNode(javascript()->StoreNamed(strict_mode(), name), object,
value);
+ environment()->Push(value);
PrepareFrameState(store, expr->AssignmentId());
+ environment()->Pop();
break;
}
case KEYED_PROPERTY: {
@@ -1439,7 +1444,9 @@
Node* object = environment()->Pop();
Node* store = NewNode(javascript()->StoreProperty(strict_mode()),
object,
key, value);
+ environment()->Push(value);
PrepareFrameState(store, expr->AssignmentId());
+ environment()->Pop();
break;
}
}
=======================================
--- /trunk/src/compiler/code-generator.cc Thu Sep 4 10:39:50 2014 UTC
+++ /trunk/src/compiler/code-generator.cc Tue Sep 16 07:50:38 2014 UTC
@@ -19,7 +19,6 @@
masm_(code->zone()->isolate(), NULL, 0),
resolver_(this),
safepoints_(code->zone()),
- deoptimization_points_(code->zone()),
deoptimization_states_(code->zone()),
deoptimization_literals_(code->zone()),
translations_(code->zone()) {}
@@ -46,10 +45,16 @@
i != code()->end(); ++i) {
AssembleInstruction(*i);
}
-
- EmitLazyDeoptimizationCallTable();
FinishCode(masm());
+
+ // Ensure there is space for lazy deopt.
+ if (!info->IsStub()) {
+ int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+ while (masm()->pc_offset() < target_offset) {
+ masm()->nop();
+ }
+ }
safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
@@ -74,10 +79,9 @@
}
-Safepoint::Id CodeGenerator::RecordSafepoint(PointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode
deopt_mode) {
+void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind
kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
const ZoneList<InstructionOperand*>* operands =
pointers->GetNormalizedOperands();
Safepoint safepoint =
@@ -91,7 +95,6 @@
safepoint.DefinePointerRegister(reg, zone());
}
}
- return safepoint.id();
}
@@ -170,19 +173,6 @@
if (move != NULL) resolver()->Resolve(move);
}
}
-
-
-void CodeGenerator::EmitLazyDeoptimizationCallTable() {
- // ZoneDeque<DeoptimizationPoint*>::iterator iter;
- int i = 0;
- for (ZoneDeque<DeoptimizationPoint*>::iterator
- iter = deoptimization_points_.begin();
- iter != deoptimization_points_.end(); iter++, i++) {
- int pc_offset = masm()->pc_offset();
- AssembleDeoptimizerCall((*iter)->lazy_state_id());
- safepoints()->SetDeoptimizationPc((*iter)->safepoint(), pc_offset);
- }
-}
void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
@@ -231,7 +221,7 @@
data->SetTranslationIndex(
i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
data->SetArgumentsStackHeight(i, Smi::FromInt(0));
- data->SetPc(i, Smi::FromInt(-1));
+ data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
}
code_object->set_deoptimization_data(*data);
@@ -243,10 +233,14 @@
bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
- Safepoint::Id safepoint_id = RecordSafepoint(
+ RecordSafepoint(
instr->pointer_map(), Safepoint::kSimple, 0,
needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
+ if (flags & CallDescriptor::kNeedsNopAfterCall) {
+ AddNopForSmiCodeInlining();
+ }
+
if (needs_frame_state) {
// If the frame state is present, it starts at argument 1
// (just after the code address).
@@ -255,15 +249,19 @@
int frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetFrameStateDescriptor(instr, frame_state_offset);
- int deopt_state_id =
- BuildTranslation(instr, frame_state_offset, kIgnoreOutput);
- int lazy_deopt_state_id = deopt_state_id;
+ int pc_offset = masm()->pc_offset();
+ int deopt_state_id = BuildTranslation(instr, pc_offset,
frame_state_offset,
+ descriptor->state_combine());
+ // If the pre-call frame state differs from the post-call one, produce
the
+ // pre-call frame state, too.
+ // TODO(jarin) We might want to avoid building the pre-call frame state
+ // because it is only used to get locals and arguments (by the
debugger and
+ // f.arguments), and those are the same in the pre-call and post-call
+ // states.
if (descriptor->state_combine() != kIgnoreOutput) {
- lazy_deopt_state_id = BuildTranslation(instr, frame_state_offset,
- descriptor->state_combine());
+ deopt_state_id =
+ BuildTranslation(instr, -1, frame_state_offset, kIgnoreOutput);
}
- deoptimization_points_.push_back(new (zone()) DeoptimizationPoint(
- deopt_state_id, lazy_deopt_state_id, descriptor, safepoint_id));
#if DEBUG
// Make sure all the values live in stack slots or they are immediates.
// (The values should not live in register because registers are
clobbered
@@ -273,11 +271,7 @@
CHECK(op->IsStackSlot() || op->IsImmediate());
}
#endif
- safepoints()->RecordLazyDeoptimizationIndex(lazy_deopt_state_id);
- }
-
- if (flags & CallDescriptor::kNeedsNopAfterCall) {
- AddNopForSmiCodeInlining();
+ safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
}
}
@@ -340,7 +334,8 @@
}
-int CodeGenerator::BuildTranslation(Instruction* instr, int
frame_state_offset,
+int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
+ int frame_state_offset,
OutputFrameStateCombine state_combine)
{
FrameStateDescriptor* descriptor =
GetFrameStateDescriptor(instr, frame_state_offset);
@@ -354,7 +349,7 @@
int deoptimization_id = static_cast<int>(deoptimization_states_.size());
deoptimization_states_.push_back(new (zone()) DeoptimizationState(
- descriptor->bailout_id(), translation.index()));
+ descriptor->bailout_id(), translation.index(), pc_offset));
return deoptimization_id;
}
=======================================
--- /trunk/src/compiler/code-generator.h Thu Sep 4 10:39:50 2014 UTC
+++ /trunk/src/compiler/code-generator.h Tue Sep 16 07:50:38 2014 UTC
@@ -46,8 +46,8 @@
}
// Record a safepoint with the given pointer map.
- Safepoint::Id RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
- int arguments, Safepoint::DeoptMode
deopt_mode);
+ void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode deopt_mode);
// Assemble code for the specified instruction.
void AssembleInstruction(Instruction* instr);
@@ -84,12 +84,12 @@
//
===========================================================================
// Deoptimization table construction
void AddSafepointAndDeopt(Instruction* instr);
- void EmitLazyDeoptimizationCallTable();
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
int frame_state_offset);
- int BuildTranslation(Instruction* instr, int frame_state_offset,
+ int BuildTranslation(Instruction* instr, int pc_offset,
+ int frame_state_offset,
OutputFrameStateCombine state_combine);
void BuildTranslationForFrameStateDescriptor(
FrameStateDescriptor* descriptor, Instruction* instr,
@@ -100,39 +100,21 @@
void AddNopForSmiCodeInlining();
//
===========================================================================
- class DeoptimizationPoint : public ZoneObject {
- public:
- int state_id() const { return state_id_; }
- int lazy_state_id() const { return lazy_state_id_; }
- FrameStateDescriptor* descriptor() const { return descriptor_; }
- Safepoint::Id safepoint() const { return safepoint_; }
-
- DeoptimizationPoint(int state_id, int lazy_state_id,
- FrameStateDescriptor* descriptor,
- Safepoint::Id safepoint)
- : state_id_(state_id),
- lazy_state_id_(lazy_state_id),
- descriptor_(descriptor),
- safepoint_(safepoint) {}
-
- private:
- int state_id_;
- int lazy_state_id_;
- FrameStateDescriptor* descriptor_;
- Safepoint::Id safepoint_;
- };
-
struct DeoptimizationState : ZoneObject {
public:
BailoutId bailout_id() const { return bailout_id_; }
int translation_id() const { return translation_id_; }
+ int pc_offset() const { return pc_offset_; }
- DeoptimizationState(BailoutId bailout_id, int translation_id)
- : bailout_id_(bailout_id), translation_id_(translation_id) {}
+ DeoptimizationState(BailoutId bailout_id, int translation_id, int
pc_offset)
+ : bailout_id_(bailout_id),
+ translation_id_(translation_id),
+ pc_offset_(pc_offset) {}
private:
BailoutId bailout_id_;
int translation_id_;
+ int pc_offset_;
};
InstructionSequence* code_;
@@ -141,7 +123,6 @@
MacroAssembler masm_;
GapResolver resolver_;
SafepointTableBuilder safepoints_;
- ZoneDeque<DeoptimizationPoint*> deoptimization_points_;
ZoneDeque<DeoptimizationState*> deoptimization_states_;
ZoneDeque<Handle<Object> > deoptimization_literals_;
TranslationBuffer translations_;
=======================================
--- /trunk/src/compiler/linkage.cc Tue Sep 9 00:05:04 2014 UTC
+++ /trunk/src/compiler/linkage.cc Tue Sep 16 07:50:38 2014 UTC
@@ -117,9 +117,11 @@
// few chosen runtime functions.
switch (function) {
case Runtime::kDebugBreak:
+ case Runtime::kDebugGetLoadedScripts:
case Runtime::kDeoptimizeFunction:
+ case Runtime::kInlineCallFunction:
+ case Runtime::kPrepareStep:
case Runtime::kSetScriptBreakPoint:
- case Runtime::kDebugGetLoadedScripts:
case Runtime::kStackGuard:
return true;
default:
=======================================
--- /trunk/src/compiler/scheduler.cc Tue Sep 2 12:59:15 2014 UTC
+++ /trunk/src/compiler/scheduler.cc Tue Sep 16 07:50:38 2014 UTC
@@ -638,13 +638,19 @@
int max = static_cast<int>(schedule_->rpo_order()->size());
for (int i = max - 1; i >= 0; i--) {
BasicBlock* block = schedule_->rpo_order()->at(i);
+ // TODO(titzer): we place at most one floating control structure per
+ // basic block because scheduling currently can interleave phis from
+ // one subgraph with the merges from another subgraph.
+ bool one_placed = false;
for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
Node* node = block->nodes_[j];
SchedulerData* data = GetData(node);
- if (data->is_floating_control_ && !data->is_connected_control_) {
+ if (data->is_floating_control_ && !data->is_connected_control_ &&
+ !one_placed) {
Trace(" Floating control #%d:%s was scheduled in B%d\n",
node->id(),
node->op()->mnemonic(), block->id());
ConnectFloatingControlSubgraph(block, node);
+ one_placed = true;
}
}
}
=======================================
--- /trunk/src/compiler/simplified-operator-reducer-unittest.cc Mon Sep 15
00:05:18 2014 UTC
+++ /trunk/src/compiler/simplified-operator-reducer-unittest.cc Tue Sep 16
07:50:38 2014 UTC
@@ -129,7 +129,7 @@
namespace {
struct UnaryOperator {
- const Operator* (SimplifiedOperatorBuilder::*constructor)() const;
+ const Operator* (SimplifiedOperatorBuilder::*constructor)();
const char* constructor_name;
};
=======================================
--- /trunk/src/compiler/simplified-operator-unittest.cc Mon Sep 15 00:05:18
2014 UTC
+++ /trunk/src/compiler/simplified-operator-unittest.cc Tue Sep 16 07:50:38
2014 UTC
@@ -18,7 +18,7 @@
namespace {
struct PureOperator {
- const Operator* (SimplifiedOperatorBuilder::*constructor)() const;
+ const Operator* (SimplifiedOperatorBuilder::*constructor)();
IrOpcode::Value opcode;
Operator::Properties properties;
int value_input_count;
=======================================
--- /trunk/src/compiler/simplified-operator.cc Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/src/compiler/simplified-operator.cc Tue Sep 16 07:50:38 2014 UTC
@@ -117,15 +117,13 @@
: impl_(kImpl.Get()), zone_(zone) {}
-#define PURE(Name, properties, input_count) \
- const Operator* SimplifiedOperatorBuilder::Name() const { \
- return &impl_.k##Name; \
- }
+#define PURE(Name, properties, input_count) \
+ const Operator* SimplifiedOperatorBuilder::Name() { return
&impl_.k##Name; }
PURE_OP_LIST(PURE)
#undef PURE
-const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type)
const {
+const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
// TODO(titzer): What about the type parameter?
return new (zone()) SimpleOperator(IrOpcode::kReferenceEqual,
Operator::kCommutative |
Operator::kPure,
@@ -133,11 +131,11 @@
}
-#define ACCESS(Name, Type, properties, input_count,
output_count) \
- const Operator* SimplifiedOperatorBuilder::Name(const Type& access)
const { \
- return new
(zone()) \
- Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow |
properties, \
- input_count, output_count, #Name,
access); \
+#define ACCESS(Name, Type, properties, input_count,
output_count) \
+ const Operator* SimplifiedOperatorBuilder::Name(const Type& access)
{ \
+ return new
(zone()) \
+ Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow |
properties, \
+ input_count, output_count, #Name,
access); \
}
ACCESS_OP_LIST(ACCESS)
#undef ACCESS
=======================================
--- /trunk/src/compiler/simplified-operator.h Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/src/compiler/simplified-operator.h Tue Sep 16 07:50:38 2014 UTC
@@ -90,39 +90,39 @@
public:
explicit SimplifiedOperatorBuilder(Zone* zone);
- const Operator* BooleanNot() const WARN_UNUSED_RESULT;
+ const Operator* BooleanNot();
- const Operator* NumberEqual() const WARN_UNUSED_RESULT;
- const Operator* NumberLessThan() const WARN_UNUSED_RESULT;
- const Operator* NumberLessThanOrEqual() const WARN_UNUSED_RESULT;
- const Operator* NumberAdd() const WARN_UNUSED_RESULT;
- const Operator* NumberSubtract() const WARN_UNUSED_RESULT;
- const Operator* NumberMultiply() const WARN_UNUSED_RESULT;
- const Operator* NumberDivide() const WARN_UNUSED_RESULT;
- const Operator* NumberModulus() const WARN_UNUSED_RESULT;
- const Operator* NumberToInt32() const WARN_UNUSED_RESULT;
- const Operator* NumberToUint32() const WARN_UNUSED_RESULT;
+ const Operator* NumberEqual();
+ const Operator* NumberLessThan();
+ const Operator* NumberLessThanOrEqual();
+ const Operator* NumberAdd();
+ const Operator* NumberSubtract();
+ const Operator* NumberMultiply();
+ const Operator* NumberDivide();
+ const Operator* NumberModulus();
+ const Operator* NumberToInt32();
+ const Operator* NumberToUint32();
- const Operator* ReferenceEqual(Type* type) const WARN_UNUSED_RESULT;
+ const Operator* ReferenceEqual(Type* type);
- const Operator* StringEqual() const WARN_UNUSED_RESULT;
- const Operator* StringLessThan() const WARN_UNUSED_RESULT;
- const Operator* StringLessThanOrEqual() const WARN_UNUSED_RESULT;
- const Operator* StringAdd() const WARN_UNUSED_RESULT;
+ const Operator* StringEqual();
+ const Operator* StringLessThan();
+ const Operator* StringLessThanOrEqual();
+ const Operator* StringAdd();
- const Operator* ChangeTaggedToInt32() const WARN_UNUSED_RESULT;
- const Operator* ChangeTaggedToUint32() const WARN_UNUSED_RESULT;
- const Operator* ChangeTaggedToFloat64() const WARN_UNUSED_RESULT;
- const Operator* ChangeInt32ToTagged() const WARN_UNUSED_RESULT;
- const Operator* ChangeUint32ToTagged() const WARN_UNUSED_RESULT;
- const Operator* ChangeFloat64ToTagged() const WARN_UNUSED_RESULT;
- const Operator* ChangeBoolToBit() const WARN_UNUSED_RESULT;
- const Operator* ChangeBitToBool() const WARN_UNUSED_RESULT;
+ const Operator* ChangeTaggedToInt32();
+ const Operator* ChangeTaggedToUint32();
+ const Operator* ChangeTaggedToFloat64();
+ const Operator* ChangeInt32ToTagged();
+ const Operator* ChangeUint32ToTagged();
+ const Operator* ChangeFloat64ToTagged();
+ const Operator* ChangeBoolToBit();
+ const Operator* ChangeBitToBool();
- const Operator* LoadField(const FieldAccess&) const WARN_UNUSED_RESULT;
- const Operator* StoreField(const FieldAccess&) const WARN_UNUSED_RESULT;
- const Operator* LoadElement(const ElementAccess&) const
WARN_UNUSED_RESULT;
- const Operator* StoreElement(const ElementAccess&) const
WARN_UNUSED_RESULT;
+ const Operator* LoadField(const FieldAccess&);
+ const Operator* StoreField(const FieldAccess&);
+ const Operator* LoadElement(const ElementAccess&);
+ const Operator* StoreElement(const ElementAccess&);
private:
Zone* zone() const { return zone_; }
=======================================
--- /trunk/src/compiler/typer.cc Thu Sep 11 00:05:22 2014 UTC
+++ /trunk/src/compiler/typer.cc Tue Sep 16 07:50:38 2014 UTC
@@ -119,19 +119,27 @@
public:
RunVisitor(Typer* typer, MaybeHandle<Context> context)
: Visitor(typer, context),
- phis(NodeSet::key_compare(),
NodeSet::allocator_type(typer->zone())) {}
+ redo(NodeSet::key_compare(),
NodeSet::allocator_type(typer->zone())) {}
GenericGraphVisit::Control Post(Node* node) {
if (OperatorProperties::HasValueOutput(node->op())) {
Bounds bounds = TypeNode(node);
NodeProperties::SetBounds(node, bounds);
- // Remember phis for least fixpoint iteration.
- if (node->opcode() == IrOpcode::kPhi) phis.insert(node);
+ // Remember incompletely typed nodes for least fixpoint iteration.
+ int arity = OperatorProperties::GetValueInputCount(node->op());
+ for (int i = 0; i < arity; ++i) {
+ // TODO(rossberg): change once IsTyped is available.
+ // if
(!NodeProperties::IsTyped(NodeProperties::GetValueInput(node, i)))
+ if (OperandType(node, i).upper->Is(Type::None())) {
+ redo.insert(node);
+ break;
+ }
+ }
}
return GenericGraphVisit::CONTINUE;
}
- NodeSet phis;
+ NodeSet redo;
};
@@ -190,7 +198,7 @@
RunVisitor typing(this, context);
graph->VisitNodeInputsFromEnd(&typing);
// Find least fixpoint.
- for (NodeSetIter i = typing.phis.begin(); i != typing.phis.end(); ++i) {
+ for (NodeSetIter i = typing.redo.begin(); i != typing.redo.end(); ++i) {
Widen(graph, *i, context);
}
}
=======================================
--- /trunk/src/d8.cc Tue Sep 2 12:59:15 2014 UTC
+++ /trunk/src/d8.cc Tue Sep 16 07:50:38 2014 UTC
@@ -1612,7 +1612,16 @@
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
}
int result = 0;
- Isolate* isolate = Isolate::New();
+ Isolate::CreateParams create_params;
+#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE)
+ if (i::FLAG_gdbjit) {
+ create_params.code_event_handler = i::GDBJITInterface::EventHandler;
+ }
+#endif
+#ifdef ENABLE_VTUNE_JIT_INTERFACE
+ vTune::InitializeVtuneForV8(create_params);
+#endif
+ Isolate* isolate = Isolate::New(create_params);
#ifndef V8_SHARED
v8::ResourceConstraints constraints;
constraints.ConfigureDefaults(base::SysInfo::AmountOfPhysicalMemory(),
@@ -1624,15 +1633,6 @@
{
Isolate::Scope scope(isolate);
Initialize(isolate);
-#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE)
- if (i::FLAG_gdbjit) {
- v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault,
- i::GDBJITInterface::EventHandler);
- }
-#endif
-#ifdef ENABLE_VTUNE_JIT_INTERFACE
- vTune::InitializeVtuneForV8();
-#endif
PerIsolateData data(isolate);
InitializeDebugger(isolate);
=======================================
--- /trunk/src/deoptimizer.cc Tue Sep 9 00:05:04 2014 UTC
+++ /trunk/src/deoptimizer.cc Tue Sep 16 07:50:38 2014 UTC
@@ -353,7 +353,7 @@
SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
int deopt_index = safepoint.deoptimization_index();
// Turbofan deopt is checked when we are patching addresses on stack.
- bool turbofanned = code->is_turbofanned();
+ bool turbofanned = code->is_turbofanned()
&& !FLAG_turbo_deoptimization;
bool safe_to_deopt =
deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
CHECK(topmost_optimized_code == NULL || safe_to_deopt ||
turbofanned);
@@ -400,10 +400,6 @@
}
element = next;
}
-
- if (FLAG_turbo_deoptimization) {
- PatchStackForMarkedCode(isolate);
- }
// TODO(titzer): we need a handle scope only because of the macro
assembler,
// which is only used in EnsureCodeForDeoptimizationEntry.
@@ -426,11 +422,7 @@
shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code");
// Do platform-specific patching to force any activations to lazy
deopt.
- //
- // We skip patching Turbofan code - we patch return addresses on stack.
- // TODO(jarin) We should still zap the code object (but we have to
- // be careful not to zap the deoptimization block).
- if (!codes[i]->is_turbofanned()) {
+ if (!codes[i]->is_turbofanned() || FLAG_turbo_deoptimization) {
PatchCodeForDeoptimization(isolate, codes[i]);
// We might be in the middle of incremental marking with compaction.
@@ -440,40 +432,6 @@
}
}
}
-
-
-// For all marked Turbofanned code on stack, change the return address to
go
-// to the deoptimization block.
-void Deoptimizer::PatchStackForMarkedCode(Isolate* isolate) {
- // TODO(jarin) We should tolerate missing patch entry for the topmost
frame.
- for (StackFrameIterator it(isolate,
isolate->thread_local_top()); !it.done();
- it.Advance()) {
- StackFrame::Type type = it.frame()->type();
- if (type == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
- if (code->is_turbofanned() && code->marked_for_deoptimization()) {
- JSFunction* function =
- static_cast<OptimizedFrame*>(it.frame())->function();
- Address* pc_address = it.frame()->pc_address();
- int pc_offset =
- static_cast<int>(*pc_address - code->instruction_start());
- SafepointEntry safepoint_entry =
code->GetSafepointEntry(*pc_address);
- unsigned new_pc_offset = safepoint_entry.deoptimization_pc();
-
- if (FLAG_trace_deopt) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[patching stack address for function: ");
- function->PrintName(scope.file());
- PrintF(scope.file(), " (Pc offset %i -> %i)]\n", pc_offset,
- new_pc_offset);
- }
-
- CHECK(new_pc_offset != Safepoint::kNoDeoptimizationPc);
- *pc_address += static_cast<int>(new_pc_offset) - pc_offset;
- }
- }
- }
-}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
=======================================
--- /trunk/src/deoptimizer.h Tue Sep 9 00:05:04 2014 UTC
+++ /trunk/src/deoptimizer.h Tue Sep 16 07:50:38 2014 UTC
@@ -17,19 +17,9 @@
static inline double read_double_value(Address p) {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- return Memory::double_at(p);
-#else // V8_HOST_CAN_READ_UNALIGNED
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned address.
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = *reinterpret_cast<uint32_t*>(p);
- c.u[1] = *reinterpret_cast<uint32_t*>(p + 4);
- return c.d;
-#endif // V8_HOST_CAN_READ_UNALIGNED
+ double d;
+ memcpy(&d, p, sizeof(d));
+ return d;
}
@@ -177,8 +167,6 @@
// refer to that code.
static void DeoptimizeMarkedCode(Isolate* isolate);
- static void PatchStackForMarkedCode(Isolate* isolate);
-
// Visit all the known optimized functions in a given isolate.
static void VisitAllOptimizedFunctions(
Isolate* isolate, OptimizedFunctionVisitor* visitor);
=======================================
--- /trunk/src/heap/gc-idle-time-handler-unittest.cc Tue Sep 2 12:59:15
2014 UTC
+++ /trunk/src/heap/gc-idle-time-handler-unittest.cc Tue Sep 16 07:50:38
2014 UTC
@@ -32,8 +32,8 @@
}
static const size_t kSizeOfObjects = 100 * MB;
- static const size_t kMarkCompactSpeed = 100 * KB;
- static const size_t kMarkingSpeed = 100 * KB;
+ static const size_t kMarkCompactSpeed = 200 * KB;
+ static const size_t kMarkingSpeed = 200 * KB;
private:
GCIdleTimeHandler handler_;
@@ -87,7 +87,7 @@
TEST(GCIdleTimeHandler, EstimateMarkCompactTimeNonZero) {
size_t size = 100 * MB;
- size_t speed = 10 * KB;
+ size_t speed = 1 * MB;
size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
EXPECT_EQ(size / speed, time);
}
=======================================
--- /trunk/src/heap/gc-idle-time-handler.cc Fri Sep 12 00:05:16 2014 UTC
+++ /trunk/src/heap/gc-idle-time-handler.cc Tue Sep 16 07:50:38 2014 UTC
@@ -10,7 +10,7 @@
namespace internal {
const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
-const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000000;
+const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
@@ -78,9 +78,10 @@
}
}
if (heap_state.incremental_marking_stopped) {
- if (idle_time_in_ms >= EstimateMarkCompactTime(
- heap_state.size_of_objects,
-
heap_state.mark_compact_speed_in_bytes_per_ms) ||
+ size_t estimated_time_in_ms =
+ EstimateMarkCompactTime(heap_state.size_of_objects,
+
heap_state.mark_compact_speed_in_bytes_per_ms);
+ if (idle_time_in_ms >= estimated_time_in_ms ||
(heap_state.size_of_objects < kSmallHeapSize &&
heap_state.contexts_disposed > 0)) {
// If there are no more than two GCs left in this idle round and we
are
=======================================
--- /trunk/src/heap/heap.cc Thu Sep 11 00:05:22 2014 UTC
+++ /trunk/src/heap/heap.cc Tue Sep 16 07:50:38 2014 UTC
@@ -4316,7 +4316,6 @@
GCIdleTimeAction action =
gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
- contexts_disposed_ = 0;
bool result = false;
switch (action.type) {
case DO_INCREMENTAL_MARKING:
@@ -4352,6 +4351,7 @@
PrintF("]\n");
}
+ contexts_disposed_ = 0;
return result;
}
=======================================
--- /trunk/src/objects.cc Fri Sep 12 00:05:16 2014 UTC
+++ /trunk/src/objects.cc Tue Sep 16 07:50:38 2014 UTC
@@ -8507,36 +8507,7 @@
static inline bool CompareRawStringContents(const Char* const a,
const Char* const b,
int length) {
- int i = 0;
-#ifndef V8_HOST_CAN_READ_UNALIGNED
- // If this architecture isn't comfortable reading unaligned ints
- // then we have to check that the strings are aligned before
- // comparing them blockwise.
- const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT
- uintptr_t pa_addr = reinterpret_cast<uintptr_t>(a);
- uintptr_t pb_addr = reinterpret_cast<uintptr_t>(b);
- if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
-#endif
- const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT
- int endpoint = length - kStepSize;
- // Compare blocks until we reach near the end of the string.
- for (; i <= endpoint; i += kStepSize) {
- uint32_t wa = *reinterpret_cast<const uint32_t*>(a + i);
- uint32_t wb = *reinterpret_cast<const uint32_t*>(b + i);
- if (wa != wb) {
- return false;
- }
- }
-#ifndef V8_HOST_CAN_READ_UNALIGNED
- }
-#endif
- // Compare the remaining characters that didn't fit into a block.
- for (; i < length; i++) {
- if (a[i] != b[i]) {
- return false;
- }
- }
- return true;
+ return CompareChars(a, b, length) == 0;
}
@@ -10937,13 +10908,6 @@
} else {
os << "<none>";
}
- if (entry.deoptimization_pc() != Safepoint::kNoDeoptimizationPc) {
- Vector<char> buf2 = Vector<char>::New(30);
- SNPrintF(buf2, "%6d", entry.deoptimization_pc());
- os << buf2.start();
- } else {
- os << "<none>";
- }
if (entry.argument_count() > 0) {
os << " argc: " << entry.argument_count();
}
=======================================
--- /trunk/src/objects.h Thu Sep 11 00:05:22 2014 UTC
+++ /trunk/src/objects.h Tue Sep 16 07:50:38 2014 UTC
@@ -9172,22 +9172,33 @@
static inline int NonAsciiStart(const char* chars, int length) {
const char* start = chars;
const char* limit = chars + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- DCHECK(unibrow::Utf8::kMaxOneByteChar == 0x7F);
- const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
- while (chars + sizeof(uintptr_t) <= limit) {
- if (*reinterpret_cast<const uintptr_t*>(chars) & non_one_byte_mask) {
- return static_cast<int>(chars - start);
+
+ if (length >= kIntptrSize) {
+ // Check unaligned bytes.
+ while (!IsAligned(reinterpret_cast<intptr_t>(chars),
sizeof(uintptr_t))) {
+ if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar)
{
+ return static_cast<int>(chars - start);
+ }
+ ++chars;
}
- chars += sizeof(uintptr_t);
+ // Check aligned words.
+ DCHECK(unibrow::Utf8::kMaxOneByteChar == 0x7F);
+ const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
+ while (chars + sizeof(uintptr_t) <= limit) {
+ if (*reinterpret_cast<const uintptr_t*>(chars) &
non_one_byte_mask) {
+ return static_cast<int>(chars - start);
+ }
+ chars += sizeof(uintptr_t);
+ }
}
-#endif
+ // Check remaining unaligned bytes.
while (chars < limit) {
if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
return static_cast<int>(chars - start);
}
++chars;
}
+
return static_cast<int>(chars - start);
}
=======================================
--- /trunk/src/regexp-macro-assembler-irregexp.h Thu Jun 5 00:04:53 2014
UTC
+++ /trunk/src/regexp-macro-assembler-irregexp.h Tue Sep 16 07:50:38 2014
UTC
@@ -31,6 +31,7 @@
virtual ~RegExpMacroAssemblerIrregexp();
// The byte-code interpreter checks on each push anyway.
virtual int stack_limit_slack() { return 1; }
+ virtual bool CanReadUnaligned() { return false; }
virtual void Bind(Label* label);
virtual void AdvanceCurrentPosition(int by); // Signed cp change.
virtual void PopCurrentPosition();
=======================================
--- /trunk/src/regexp-macro-assembler.cc Thu Sep 11 00:05:22 2014 UTC
+++ /trunk/src/regexp-macro-assembler.cc Tue Sep 16 07:50:38 2014 UTC
@@ -22,15 +22,6 @@
RegExpMacroAssembler::~RegExpMacroAssembler() {
}
-
-
-bool RegExpMacroAssembler::CanReadUnaligned() {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- return true;
-#else
- return false;
-#endif
-}
#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
=======================================
--- /trunk/src/regexp-macro-assembler.h Thu Sep 11 00:05:22 2014 UTC
+++ /trunk/src/regexp-macro-assembler.h Tue Sep 16 07:50:38 2014 UTC
@@ -48,7 +48,7 @@
// kCheckStackLimit flag to push operations (instead of
kNoStackLimitCheck)
// at least once for every stack_limit() pushes that are executed.
virtual int stack_limit_slack() = 0;
- virtual bool CanReadUnaligned();
+ virtual bool CanReadUnaligned() = 0;
virtual void AdvanceCurrentPosition(int by) = 0; // Signed cp change.
virtual void AdvanceRegister(int reg, int by) = 0; // r[reg] += by.
// Continues execution from the position pushed on the top of the
backtrack
=======================================
--- /trunk/src/runtime.cc Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/src/runtime.cc Tue Sep 16 07:50:38 2014 UTC
@@ -6552,34 +6552,38 @@
bool changed = false;
uintptr_t or_acc = 0;
const char* const limit = src + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- // Process the prefix of the input that requires no conversion one
- // (machine) word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
- or_acc |= w;
- if (AsciiRangeMask(w, lo, hi) != 0) {
- changed = true;
- break;
+
+ // dst is newly allocated and always aligned.
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(dst), sizeof(uintptr_t)));
+ // Only attempt processing one word at a time if src is also aligned.
+ if (IsAligned(reinterpret_cast<intptr_t>(src), sizeof(uintptr_t))) {
+ // Process the prefix of the input that requires no conversion one
aligned
+ // (machine) word at a time.
+ while (src <= limit - sizeof(uintptr_t)) {
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+ or_acc |= w;
+ if (AsciiRangeMask(w, lo, hi) != 0) {
+ changed = true;
+ break;
+ }
+ *reinterpret_cast<uintptr_t*>(dst) = w;
+ src += sizeof(uintptr_t);
+ dst += sizeof(uintptr_t);
}
- *reinterpret_cast<uintptr_t*>(dst) = w;
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
+ // Process the remainder of the input performing conversion when
+ // required one word at a time.
+ while (src <= limit - sizeof(uintptr_t)) {
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+ or_acc |= w;
+ uintptr_t m = AsciiRangeMask(w, lo, hi);
+ // The mask has high (7th) bit set in every byte that needs
+ // conversion and we know that the distance between cases is
+ // 1 << 5.
+ *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
+ src += sizeof(uintptr_t);
+ dst += sizeof(uintptr_t);
+ }
}
- // Process the remainder of the input performing conversion when
- // required one word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
- or_acc |= w;
- uintptr_t m = AsciiRangeMask(w, lo, hi);
- // The mask has high (7th) bit set in every byte that needs
- // conversion and we know that the distance between cases is
- // 1 << 5.
- *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
-#endif
// Process the last few bytes of the input (or the whole input if
// unaligned access is not supported).
while (src < limit) {
@@ -6593,9 +6597,8 @@
++src;
++dst;
}
- if ((or_acc & kAsciiMask) != 0) {
- return false;
- }
+
+ if ((or_acc & kAsciiMask) != 0) return false;
DCHECK(CheckFastAsciiConvert(
saved_dst, saved_src, length, changed,
Converter::kIsToLower));
=======================================
--- /trunk/src/safepoint-table.cc Mon Aug 25 19:57:56 2014 UTC
+++ /trunk/src/safepoint-table.cc Tue Sep 16 07:50:38 2014 UTC
@@ -43,8 +43,8 @@
length_ = Memory::uint32_at(header + kLengthOffset);
entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
pc_and_deoptimization_indexes_ = header + kHeaderSize;
- entries_ =
- pc_and_deoptimization_indexes_ + (length_ *
kPcAndDeoptimizationInfoSize);
+ entries_ = pc_and_deoptimization_indexes_ +
+ (length_ * kPcAndDeoptimizationIndexSize);
DCHECK(entry_size_ > 0);
STATIC_ASSERT(SafepointEntry::DeoptimizationIndexField::kMax ==
Safepoint::kNoDeoptimizationIndex);
@@ -56,7 +56,6 @@
for (unsigned i = 0; i < length(); i++) {
// TODO(kasperl): Replace the linear search with binary search.
if (GetPcOffset(i) == pc_offset) return GetEntry(i);
- if (GetDeoptimizationPcOffset(i) == pc_offset) return GetEntry(i);
}
return SafepointEntry();
}
@@ -111,8 +110,6 @@
info.pc = assembler->pc_offset();
info.arguments = arguments;
info.has_doubles = (kind & Safepoint::kWithDoubles);
- info.deoptimization_pc = Safepoint::kNoDeoptimizationPc;
- int safepoint_id = deoptimization_info_.length();
deoptimization_info_.Add(info, zone_);
deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex, zone_);
if (deopt_mode == Safepoint::kNoLazyDeopt) {
@@ -123,7 +120,7 @@
? new(zone_) ZoneList<int>(4, zone_)
: NULL,
zone_);
- return Safepoint(safepoint_id, indexes_.last(), registers_.last());
+ return Safepoint(indexes_.last(), registers_.last());
}
@@ -162,7 +159,6 @@
assembler->dd(deoptimization_info_[i].pc);
assembler->dd(EncodeExceptPC(deoptimization_info_[i],
deopt_index_list_[i]));
- assembler->dd(deoptimization_info_[i].deoptimization_pc);
}
// Emit table of bitmaps.
=======================================
--- /trunk/src/safepoint-table.h Mon Aug 25 19:57:56 2014 UTC
+++ /trunk/src/safepoint-table.h Tue Sep 16 07:50:38 2014 UTC
@@ -17,10 +17,9 @@
class SafepointEntry BASE_EMBEDDED {
public:
- SafepointEntry() : info_(0), deoptimization_pc_(0), bits_(NULL) {}
+ SafepointEntry() : info_(0), bits_(NULL) {}
- SafepointEntry(unsigned info, unsigned deoptimization_pc, uint8_t* bits)
- : info_(info), deoptimization_pc_(deoptimization_pc), bits_(bits) {
+ SafepointEntry(unsigned info, uint8_t* bits) : info_(info), bits_(bits) {
DCHECK(is_valid());
}
@@ -39,11 +38,6 @@
DCHECK(is_valid());
return DeoptimizationIndexField::decode(info_);
}
-
- unsigned deoptimization_pc() const {
- DCHECK(is_valid());
- return deoptimization_pc_;
- }
static const int kArgumentsFieldBits = 3;
static const int kSaveDoublesFieldBits = 1;
@@ -80,7 +74,6 @@
private:
unsigned info_;
- unsigned deoptimization_pc_;
uint8_t* bits_;
};
@@ -91,7 +84,7 @@
int size() const {
return kHeaderSize +
- (length_ * (kPcAndDeoptimizationInfoSize + entry_size_));
+ (length_ * (kPcAndDeoptimizationIndexSize + entry_size_));
}
unsigned length() const { return length_; }
unsigned entry_size() const { return entry_size_; }
@@ -100,18 +93,12 @@
DCHECK(index < length_);
return Memory::uint32_at(GetPcOffsetLocation(index));
}
-
- unsigned GetDeoptimizationPcOffset(unsigned index) const {
- DCHECK(index < length_);
- return Memory::uint32_at(GetDeoptimizationPcLocation(index));
- }
SafepointEntry GetEntry(unsigned index) const {
DCHECK(index < length_);
unsigned info = Memory::uint32_at(GetInfoLocation(index));
- unsigned deopt_pc =
Memory::uint32_at(GetDeoptimizationPcLocation(index));
uint8_t* bits = &Memory::uint8_at(entries_ + (index * entry_size_));
- return SafepointEntry(info, deopt_pc, bits);
+ return SafepointEntry(info, bits);
}
// Returns the entry for the given pc.
@@ -128,22 +115,17 @@
static const int kPcSize = kIntSize;
static const int kDeoptimizationIndexSize = kIntSize;
- static const int kDeoptimizationPcSize = kIntSize;
- static const int kPcAndDeoptimizationInfoSize =
- kPcSize + kDeoptimizationIndexSize + kDeoptimizationPcSize;
+ static const int kPcAndDeoptimizationIndexSize =
+ kPcSize + kDeoptimizationIndexSize;
Address GetPcOffsetLocation(unsigned index) const {
return pc_and_deoptimization_indexes_ +
- (index * kPcAndDeoptimizationInfoSize);
+ (index * kPcAndDeoptimizationIndexSize);
}
Address GetInfoLocation(unsigned index) const {
return GetPcOffsetLocation(index) + kPcSize;
}
-
- Address GetDeoptimizationPcLocation(unsigned index) const {
- return GetInfoLocation(index) + kDeoptimizationIndexSize;
- }
static void PrintBits(OStream& os, // NOLINT
uint8_t byte, int digits);
@@ -177,30 +159,15 @@
kLazyDeopt
};
- class Id {
- private:
- explicit Id(int id) : id_(id) {}
-
- int id_;
-
- friend class SafepointTableBuilder;
- friend class Safepoint;
- };
-
static const int kNoDeoptimizationIndex =
(1 << (SafepointEntry::kDeoptIndexBits)) - 1;
- static const unsigned kNoDeoptimizationPc = ~0U;
-
void DefinePointerSlot(int index, Zone* zone) { indexes_->Add(index,
zone); }
void DefinePointerRegister(Register reg, Zone* zone);
-
- Id id() const { return Id(id_); }
private:
- Safepoint(int id, ZoneList<int>* indexes, ZoneList<int>* registers)
- : id_(id), indexes_(indexes), registers_(registers) {}
- int id_;
+ Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers)
+ : indexes_(indexes), registers_(registers) {}
ZoneList<int>* indexes_;
ZoneList<int>* registers_;
@@ -234,11 +201,6 @@
void BumpLastLazySafepointIndex() {
last_lazy_safepoint_ = deopt_index_list_.length();
}
- void SetDeoptimizationPc(Safepoint::Id safepoint_id,
- unsigned deoptimization_pc) {
- deoptimization_info_[safepoint_id.id_].deoptimization_pc =
- deoptimization_pc;
- }
// Emit the safepoint table after the body. The number of bits per
// entry must be enough to hold all the pointer indexes.
@@ -250,7 +212,6 @@
unsigned pc;
unsigned arguments;
bool has_doubles;
- unsigned deoptimization_pc;
};
uint32_t EncodeExceptPC(const DeoptimizationInfo& info, unsigned index);
=======================================
--- /trunk/src/snapshot-source-sink.cc Tue Aug 5 00:05:55 2014 UTC
+++ /trunk/src/snapshot-source-sink.cc Tue Sep 16 07:50:38 2014 UTC
@@ -24,14 +24,10 @@
int32_t SnapshotByteSource::GetUnalignedInt() {
DCHECK(position_ < length_); // Require at least one byte left.
-#if defined(V8_HOST_CAN_READ_UNALIGNED) && __BYTE_ORDER == __LITTLE_ENDIAN
- int32_t answer = *reinterpret_cast<const int32_t*>(data_ + position_);
-#else
int32_t answer = data_[position_];
answer |= data_[position_ + 1] << 8;
answer |= data_[position_ + 2] << 16;
answer |= data_[position_ + 3] << 24;
-#endif
return answer;
}
=======================================
--- /trunk/src/third_party/vtune/v8-vtune.h Fri Jan 17 08:10:36 2014 UTC
+++ /trunk/src/third_party/vtune/v8-vtune.h Tue Sep 16 07:50:38 2014 UTC
@@ -58,9 +58,11 @@
#ifndef V8_VTUNE_H_
#define V8_VTUNE_H_
+#include "../../../include/v8.h"
+
namespace vTune {
-void InitializeVtuneForV8();
+void InitializeVtuneForV8(v8::Isolate::CreateParams& params);
} // namespace vTune
=======================================
--- /trunk/src/third_party/vtune/vtune-jit.cc Wed Jul 16 00:04:33 2014 UTC
+++ /trunk/src/third_party/vtune/vtune-jit.cc Tue Sep 16 07:50:38 2014 UTC
@@ -271,13 +271,10 @@
} // namespace internal
-void InitializeVtuneForV8() {
- if (v8::V8::Initialize()) {
- v8::V8::SetFlagsFromString("--nocompact_code_space",
- (int)strlen("--nocompact_code_space"));
- v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault,
- vTune::internal::VTUNEJITInterface::event_handler);
- }
+void InitializeVtuneForV8(v8::Isolate::CreateParams& params) {
+ v8::V8::SetFlagsFromString("--nocompact_code_space",
+ (int)strlen("--nocompact_code_space"));
+ params.code_event_handler =
vTune::internal::VTUNEJITInterface::event_handler;
}
} // namespace vTune
=======================================
--- /trunk/src/utils.h Fri Sep 12 00:05:16 2014 UTC
+++ /trunk/src/utils.h Tue Sep 16 07:50:38 2014 UTC
@@ -680,20 +680,11 @@
const rchar* rhs,
int chars) {
const lchar* limit = lhs + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*lhs) == sizeof(*rhs)) {
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); //
NOLINT
- while (lhs <= limit - kStepSize) {
- if (*reinterpret_cast<const uintptr_t*>(lhs) !=
- *reinterpret_cast<const uintptr_t*>(rhs)) {
- break;
- }
- lhs += kStepSize;
- rhs += kStepSize;
- }
+ if (sizeof(*lhs) == sizeof(char) && sizeof(*rhs) == sizeof(char)) {
+ // memcmp compares byte-by-byte, yielding wrong results for two-byte
+ // strings on little-endian systems.
+ return memcmp(lhs, rhs, chars);
}
-#endif
while (lhs < limit) {
int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
if (r != 0) return r;
@@ -1286,15 +1277,11 @@
template <typename sourcechar, typename sinkchar>
void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
if ((sizeof(*dest) == sizeof(*src)) &&
(chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) {
MemCopy(dest, src, chars * sizeof(*dest));
- return;
- }
-#endif
- while (dest < limit) {
- *dest++ = static_cast<sinkchar>(*src++);
+ } else {
+ while (dest < limit) *dest++ = static_cast<sinkchar>(*src++);
}
}
=======================================
--- /trunk/src/version.cc Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/src/version.cc Tue Sep 16 07:50:38 2014 UTC
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 29
-#define BUILD_NUMBER 64
+#define BUILD_NUMBER 66
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
=======================================
--- /trunk/test/benchmarks/benchmarks.status Wed Aug 20 00:06:26 2014 UTC
+++ /trunk/test/benchmarks/benchmarks.status Tue Sep 16 07:50:38 2014 UTC
@@ -29,5 +29,7 @@
[ALWAYS, {
# Too slow in Debug mode.
'octane/mandreel': [PASS, ['mode == debug', SKIP]],
+ # TODO(turbofan): Too slow in debug mode for now.
+ 'octane/pdfjs': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
]
=======================================
--- /trunk/test/benchmarks/testcfg.py Thu Sep 11 00:05:22 2014 UTC
+++ /trunk/test/benchmarks/testcfg.py Tue Sep 16 07:50:38 2014 UTC
@@ -186,8 +186,7 @@
# Both --nocrankshaft and --stressopt are very slow. Add TF but without
# always opt to match the way the benchmarks are run for performance
# testing.
- # TODO(mstarzinger): Enable typed pipeline soon!
- return [[], ["--turbo-filter=*", "--noturbo-types"]]
+ return [[], ["--turbo-filter=*"]]
def GetSuite(name, root):
=======================================
--- /trunk/test/cctest/cctest.status Thu Sep 11 00:05:22 2014 UTC
+++ /trunk/test/cctest/cctest.status Tue Sep 16 07:50:38 2014 UTC
@@ -83,12 +83,6 @@
# Scheduler cannot handle free-floating loops yet
'test-run-inlining/InlineLoop': [SKIP],
- # TODO(mstarzinger): Sometimes the try-catch blacklist fails.
- 'test-debug/DebugEvaluateWithoutStack': [PASS, NO_VARIANTS],
- 'test-debug/MessageQueues': [PASS, NO_VARIANTS],
- 'test-debug/NestedBreakEventContextData': [PASS, NO_VARIANTS],
- 'test-debug/SendClientDataToHandler': [PASS, NO_VARIANTS],
-
# TODO(dcarney): C calls are broken all over the place.
'test-run-machops/RunCall*': [SKIP],
'test-run-machops/RunLoadImmIndex': [SKIP],
@@ -98,36 +92,6 @@
'test-run-inlining/InlineTwiceDependentDiamond': [SKIP],
'test-run-inlining/InlineTwiceDependentDiamondDifferent': [SKIP],
- # TODO(mstarzinger): Causes crash in generated code, needs investigation.
- 'test-api/FixedUint8Array': [PASS, NO_VARIANTS],
- 'test-api/FixedUint8ClampedArray': [PASS, NO_VARIANTS],
- 'test-api/FixedInt8Array': [PASS, NO_VARIANTS],
- 'test-api/FixedUint16Array': [PASS, NO_VARIANTS],
- 'test-api/FixedInt16Array': [PASS, NO_VARIANTS],
- 'test-api/FixedUint32Array': [PASS, NO_VARIANTS],
- 'test-api/FixedInt32Array': [PASS, NO_VARIANTS],
- 'test-api/FixedFloat32Array': [PASS, NO_VARIANTS],
- 'test-api/FixedFloat64Array': [PASS, NO_VARIANTS],
- 'test-api/ExternalInt8Array': [PASS, NO_VARIANTS],
- 'test-api/ExternalUint8Array': [PASS, NO_VARIANTS],
- 'test-api/ExternalUint8ClampedArray': [PASS, NO_VARIANTS],
- 'test-api/ExternalInt16Array': [PASS, NO_VARIANTS],
- 'test-api/ExternalUint32Array': [PASS, NO_VARIANTS],
- 'test-api/ExternalUint16Array': [PASS, NO_VARIANTS],
- 'test-api/ExternalInt32Array': [PASS, NO_VARIANTS],
- 'test-api/ExternalFloat32Array': [PASS, NO_VARIANTS],
- 'test-api/ExternalArrays': [PASS, NO_VARIANTS],
- 'test-api/ExternalFloat64Array': [PASS, NO_VARIANTS],
- 'test-api/Uint8Array': [PASS, NO_VARIANTS],
- 'test-api/Int8Array': [PASS, NO_VARIANTS],
- 'test-api/Uint16Array': [PASS, NO_VARIANTS],
- 'test-api/Int16Array': [PASS, NO_VARIANTS],
- 'test-api/Uint32Array': [PASS, NO_VARIANTS],
- 'test-api/Int32Array': [PASS, NO_VARIANTS],
- 'test-api/Float64Array': [PASS, NO_VARIANTS],
- 'test-api/Float32Array': [PASS, NO_VARIANTS],
- 'test-api/Uint8ClampedArray': [PASS, NO_VARIANTS],
-
# Some tests are just too slow to run for now.
'test-api/Threading*': [PASS, NO_VARIANTS],
'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects':
[PASS, NO_VARIANTS],
=======================================
--- /trunk/test/cctest/compiler/function-tester.h Fri Aug 29 00:04:38 2014
UTC
+++ /trunk/test/cctest/compiler/function-tester.h Tue Sep 16 07:50:38 2014
UTC
@@ -67,6 +67,9 @@
Pipeline pipeline(&info);
Handle<Code> code = pipeline.GenerateCode();
+ if (FLAG_turbo_deoptimization) {
+ info.context()->native_context()->AddOptimizedCode(*code);
+ }
CHECK(!code.is_null());
function->ReplaceCode(*code);
=======================================
--- /trunk/test/cctest/compiler/test-js-typed-lowering.cc Mon Sep 15
00:05:18 2014 UTC
+++ /trunk/test/cctest/compiler/test-js-typed-lowering.cc Tue Sep 16
07:50:38 2014 UTC
@@ -47,6 +47,24 @@
NodeProperties::SetBounds(n, Bounds(Type::None(), t));
return n;
}
+
+ Node* UndefinedConstant() {
+ Unique<Object> unique =
+
Unique<Object>::CreateImmovable(isolate->factory()->undefined_value());
+ return graph.NewNode(common.HeapConstant(unique));
+ }
+
+ Node* EmptyFrameState(Node* context) {
+ Node* parameters = graph.NewNode(common.StateValues(0));
+ Node* locals = graph.NewNode(common.StateValues(0));
+ Node* stack = graph.NewNode(common.StateValues(0));
+
+ Node* state_node =
+ graph.NewNode(common.FrameState(BailoutId(0), kIgnoreOutput),
+ parameters, locals, stack, context,
UndefinedConstant());
+
+ return state_node;
+ }
Node* reduce(Node* node) {
JSGraph jsgraph(&graph, &common, &javascript, &typer, &machine);
@@ -775,12 +793,15 @@
TEST(RemoveToNumberEffects) {
+ FLAG_turbo_deoptimization = true;
+
JSTypedLoweringTester R;
Node* effect_use = NULL;
for (int i = 0; i < 10; i++) {
Node* p0 = R.Parameter(Type::Number());
Node* ton = R.Unop(R.javascript.ToNumber(), p0);
+ Node* frame_state = R.EmptyFrameState(R.context());
effect_use = NULL;
switch (i) {
@@ -796,11 +817,11 @@
effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton,
R.start());
case 3:
effect_use = R.graph.NewNode(R.javascript.Add(), ton, ton,
R.context(),
- ton, R.start());
+ frame_state, ton, R.start());
break;
case 4:
effect_use = R.graph.NewNode(R.javascript.Add(), p0, p0,
R.context(),
- ton, R.start());
+ frame_state, ton, R.start());
break;
case 5:
effect_use = R.graph.NewNode(R.common.Return(), p0, ton,
R.start());
=======================================
--- /trunk/test/cctest/test-api.cc Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/test/cctest/test-api.cc Tue Sep 16 07:50:38 2014 UTC
@@ -14171,19 +14171,14 @@
void SetFunctionEntryHookTest::RunTest() {
// Work in a new isolate throughout.
- v8::Isolate* isolate = v8::Isolate::New();
-
- // Test setting the entry hook on the new isolate.
- CHECK(v8::V8::SetFunctionEntryHook(isolate, EntryHook));
-
- // Replacing the hook, once set should fail.
- CHECK_EQ(false, v8::V8::SetFunctionEntryHook(isolate, EntryHook));
+ v8::Isolate::CreateParams create_params;
+ create_params.entry_hook = EntryHook;
+ create_params.code_event_handler = JitEvent;
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
{
v8::Isolate::Scope scope(isolate);
- v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, JitEvent);
-
RunLoopInNewEnv(isolate);
// Check the exepected invocation counts.
@@ -14211,9 +14206,6 @@
// We should record no invocations in this isolate.
CHECK_EQ(0, static_cast<int>(invocations_.size()));
}
- // Since the isolate has been used, we shouldn't be able to set an entry
- // hook anymore.
- CHECK_EQ(false, v8::V8::SetFunctionEntryHook(isolate, EntryHook));
isolate->Dispose();
}
@@ -14407,7 +14399,7 @@
saw_bar = 0;
move_events = 0;
- V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, event_handler);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault,
event_handler);
// Generate new code objects sparsely distributed across several
// different fragmented code-space pages.
@@ -14431,7 +14423,7 @@
// Force code movement.
heap->CollectAllAvailableGarbage("TestSetJitCodeEventHandler");
- V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
CHECK_LE(kIterations, saw_bar);
CHECK_LT(0, move_events);
@@ -14461,8 +14453,9 @@
i::HashMap lineinfo(MatchPointers);
jitcode_line_info = &lineinfo;
- V8::SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting,
event_handler);
- V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting,
+ event_handler);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
jitcode_line_info = NULL;
// We expect that we got some events. Note that if we could get code
removal
=======================================
--- /trunk/test/mjsunit/mjsunit.status Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/test/mjsunit/mjsunit.status Tue Sep 16 07:50:38 2014 UTC
@@ -63,30 +63,20 @@
# from the deoptimizer to do that.
'arguments-indirect': [PASS, NO_VARIANTS],
- # TODO(mstarzinger): Sometimes the try-catch blacklist fails.
- 'debug-references': [PASS, NO_VARIANTS],
- 'regress/regress-263': [PASS, NO_VARIANTS],
-
- # TODO(mstarzinger): Causes crash in generated code, needs investigation.
- 'array-sort': [PASS, NO_VARIANTS],
- 'dehoisted-array-index': [PASS, NO_VARIANTS],
-
- # TODO(mstarzinger/rossberg): Typer doesn't like contexts very much.
+ # TODO(rossberg): Typer doesn't like contexts very much.
'harmony/block-conflicts': [PASS, NO_VARIANTS],
'harmony/block-for': [PASS, NO_VARIANTS],
'harmony/block-leave': [PASS, NO_VARIANTS],
'harmony/block-let-crankshaft': [PASS, NO_VARIANTS],
'harmony/empty-for': [PASS, NO_VARIANTS],
- # TODO(mstarzinger): Needs more investigation.
- 'polymorph-arrays': [PASS, NO_VARIANTS],
- 'string-oom-concat': [PASS, NO_VARIANTS],
- 'unbox-double-arrays': [PASS, NO_VARIANTS],
- 'compiler/osr-warm': [PASS, NO_VARIANTS],
- 'compiler/regress-3136962': [PASS, NO_VARIANTS],
- 'harmony/proxies-json': [PASS, NO_VARIANTS],
+ # TODO(mstarzinger): Typed lowering screws up valueOf vs. toString order.
'regress/regress-760-1': [PASS, NO_VARIANTS],
'regress/regress-760-2': [PASS, NO_VARIANTS],
+ 'harmony/proxies-json': [PASS, NO_VARIANTS],
+
+ # Unused code is optimized away, think about fixing the test case.
+ 'string-oom-concat': [PASS, NO_VARIANTS],
'regress/regress-crbug-357052': [PASS, NO_VARIANTS],
# Some tests are over-restrictive about object layout.
@@ -98,6 +88,7 @@
'bit-not': [PASS, NO_VARIANTS],
'json2': [PASS, NO_VARIANTS],
'packed-elements': [PASS, NO_VARIANTS],
+ 'unbox-double-arrays': [PASS, NO_VARIANTS],
'whitespaces': [PASS, NO_VARIANTS],
'compiler/osr-assert': [PASS, NO_VARIANTS],
'regress/regress-2185-2': [PASS, NO_VARIANTS],
@@ -250,6 +241,8 @@
# TODO(mstarzinger): Takes too long with TF.
'array-sort': [PASS, NO_VARIANTS],
+ 'compiler/osr-warm': [PASS, NO_VARIANTS],
+ 'regress/regress-91008': [PASS, NO_VARIANTS],
}], # 'gc_stress == True'
##############################################################################
=======================================
--- /trunk/test/mjsunit/regress/poly_count_operation.js Mon Jul 22 09:21:20
2013 UTC
+++ /trunk/test/mjsunit/regress/poly_count_operation.js Tue Sep 16 07:50:38
2014 UTC
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --turbo-deoptimization
var o1 = {x:1};
var o2 = {};
=======================================
***Additional files exist in this changeset.***
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.