Author: Richard Plangger <[email protected]>
Branch: vecopt
Changeset: r77158:b2baaa7fb44d
Date: 2015-05-06 10:02 +0200
http://bitbucket.org/pypy/pypy/changeset/b2baaa7fb44d/
Log: test_zjit up and running again (extended the fake space) enabled
vectorization algorithm for test_zjit relaxing a guard does now copy
it' operation (if vecopt fails it does not leave dirty state behind)
diff --git a/pypy/module/micronumpy/compile.py
b/pypy/module/micronumpy/compile.py
--- a/pypy/module/micronumpy/compile.py
+++ b/pypy/module/micronumpy/compile.py
@@ -112,11 +112,9 @@
def getattr(self, w_obj, w_attr):
assert isinstance(w_attr, StringObject)
- if isinstance(w_obj, boxes.W_GenericBox):
- assert False
- raise OperationError(self.w_AttributeError, self.wrap('aa'))
- assert isinstance(w_obj, DictObject)
- return w_obj.getdictvalue(self, w_attr)
+ if isinstance(w_obj, DictObject):
+ return w_obj.getdictvalue(self, w_attr)
+ return None
def isinstance_w(self, w_obj, w_tp):
try:
diff --git a/pypy/module/micronumpy/test/test_zjit.py
b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -65,7 +65,8 @@
listops=True,
listcomp=True,
backendopt=True,
- graph_and_interp_only=True)
+ graph_and_interp_only=True,
+ vectorize=True)
self.__class__.interp = interp
self.__class__.graph = graph
@@ -85,11 +86,6 @@
def test_add(self):
result = self.run("add")
- py.test.skip("don't run for now")
- self.check_simple_loop({'raw_load': 2, 'float_add': 1,
- 'raw_store': 1, 'int_add': 1,
- 'int_ge': 1, 'guard_false': 1, 'jump': 1,
- 'arraylen_gc': 1})
assert result == 3 + 3
def define_float_add():
diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py
b/rpython/jit/metainterp/optimizeopt/dependency.py
--- a/rpython/jit/metainterp/optimizeopt/dependency.py
+++ b/rpython/jit/metainterp/optimizeopt/dependency.py
@@ -112,7 +112,11 @@
def relax_guard_to(self, guard):
""" Relaxes a guard operation to an earlier guard. """
- tgt_op = self.getoperation()
+ # clone this operation object. if the vectorizer is
+ # not able to relax guards, it won't leave behind a modified operation
+ tgt_op = self.getoperation().clone()
+ op = tgt_op
+
op = guard.getoperation()
assert isinstance(tgt_op, GuardResOp)
assert isinstance(op, GuardResOp)
@@ -541,6 +545,8 @@
# handle fail args
if guard_op.getfailargs():
for arg in guard_op.getfailargs():
+ if arg is None:
+ continue
try:
for at in tracker.redefinitions(arg):
# later redefinitions are prohibited
@@ -717,7 +723,8 @@
var = self.index_vars[arg] = IndexVar(arg)
return var
- def operation_INT_LT(self, op, node):
+ bool_func_source = """
+ def operation_{name}(self, op, node):
box_a0 = op.getarg(0)
box_a1 = op.getarg(1)
left = None
@@ -727,7 +734,13 @@
if not self.is_const_integral(box_a1):
right = self.get_or_create(box_a1)
box_r = op.result
- self.comparison_vars[box_r] = IndexGuard(op.getopnum(), left, right)
+ self.comparison_vars[box_r] = CompareOperation(op.getopnum(), left,
right)
+ """
+ for name in ['INT_LT', 'INT_LE', 'INT_EQ', 'INT_NE', 'INT_NE',
+ 'INT_GT', 'INT_GE', 'UINT_LT', 'UINT_LE', 'UINT_GT',
+ 'UINT_GE']:
+ exec py.code.Source(bool_func_source.format(name=name)).compile()
+ del bool_func_source
additive_func_source = """
def operation_{name}(self, op, node):
@@ -809,7 +822,7 @@
IntegralForwardModification.inspect_operation = integral_dispatch_opt
del integral_dispatch_opt
-class IndexGuard(object):
+class CompareOperation(object):
def __init__(self, opnum, lindex_var, rindex_var):
self.opnum = opnum
self.lindex_var = lindex_var
diff --git a/rpython/jit/metainterp/optimizeopt/vectorize.py
b/rpython/jit/metainterp/optimizeopt/vectorize.py
--- a/rpython/jit/metainterp/optimizeopt/vectorize.py
+++ b/rpython/jit/metainterp/optimizeopt/vectorize.py
@@ -46,10 +46,12 @@
inline_short_preamble, start_state):
optimize_unroll(metainterp_sd, jitdriver_sd, loop, optimizations,
inline_short_preamble, start_state, False)
+ orig_ops = loop.operations
try:
opt = VectorizingOptimizer(metainterp_sd, jitdriver_sd, loop,
optimizations)
opt.propagate_all_forward()
except NotAVectorizeableLoop:
+ loop.operations = orig_ops
# vectorization is not possible, propagate only normal optimizations
pass
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit