Author: Ronan Lamy <[email protected]>
Branch: 
Changeset: r89709:f585a82772af
Date: 2017-01-23 22:37 +0000
http://bitbucket.org/pypy/pypy/changeset/f585a82772af/

Log:    Backport py3.5 asdl.py

diff too long, truncating to 2000 out of 2152 lines

diff --git a/pypy/interpreter/astcompiler/ast.py 
b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -1,8 +1,6 @@
 # Generated by tools/asdl_py.py
 from rpython.tool.pairtype import extendabletype
 from rpython.tool.sourcetools import func_with_new_name
-from rpython.rlib.signature import signature, finishsigs
-from rpython.rlib import types
 
 from pypy.interpreter import typedef
 from pypy.interpreter.baseobjspace import W_Root
@@ -10,10 +8,9 @@
 from pypy.interpreter.gateway import interp2app
 
 
-def raise_attriberr(space, w_obj, name):
-    raise oefmt(space.w_AttributeError,
-                "'%T' object has no attribute '%s'", w_obj, name)
-
+def raise_required_value(space, w_obj, name):
+    raise oefmt(space.w_ValueError,
+                "field %s is required for %T", name, w_obj)
 
 def check_string(space, w_obj):
     if not (space.isinstance_w(w_obj, space.w_str) or
@@ -34,7 +31,6 @@
 
 class AST(object):
     __metaclass__ = extendabletype
-    _attrs_ = ['lineno', 'col_offset']
 
     def walkabout(self, visitor):
         raise AssertionError("walkabout() implementation not provided")
@@ -266,6 +262,8 @@
     def from_object(space, w_node):
         w_body = get_field(space, w_node, 'body', False)
         _body = expr.from_object(space, w_body)
+        if _body is None:
+            raise_required_value(space, w_node, 'body')
         return Expression(_body)
 
 State.ast_type('Expression', 'mod', ['body'])
@@ -417,7 +415,11 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _name = space.realstr_w(w_name)
+        if _name is None:
+            raise_required_value(space, w_node, 'name')
         _args = arguments.from_object(space, w_args)
+        if _args is None:
+            raise_required_value(space, w_node, 'args')
         body_w = space.unpackiterable(w_body)
         _body = [stmt.from_object(space, w_item) for w_item in body_w]
         decorator_list_w = space.unpackiterable(w_decorator_list)
@@ -487,6 +489,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _name = space.realstr_w(w_name)
+        if _name is None:
+            raise_required_value(space, w_node, 'name')
         bases_w = space.unpackiterable(w_bases)
         _bases = [expr.from_object(space, w_item) for w_item in bases_w]
         body_w = space.unpackiterable(w_body)
@@ -620,6 +624,8 @@
         targets_w = space.unpackiterable(w_targets)
         _targets = [expr.from_object(space, w_item) for w_item in targets_w]
         _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Assign(_targets, _value, _lineno, _col_offset)
@@ -665,8 +671,14 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _target = expr.from_object(space, w_target)
+        if _target is None:
+            raise_required_value(space, w_node, 'target')
         _op = operator.from_object(space, w_op)
+        if _op is None:
+            raise_required_value(space, w_node, 'op')
         _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return AugAssign(_target, _op, _value, _lineno, _col_offset)
@@ -721,6 +733,8 @@
         values_w = space.unpackiterable(w_values)
         _values = [expr.from_object(space, w_item) for w_item in values_w]
         _nl = space.bool_w(w_nl)
+        if _nl is None:
+            raise_required_value(space, w_node, 'nl')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Print(_dest, _values, _nl, _lineno, _col_offset)
@@ -782,7 +796,11 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _target = expr.from_object(space, w_target)
+        if _target is None:
+            raise_required_value(space, w_node, 'target')
         _iter = expr.from_object(space, w_iter)
+        if _iter is None:
+            raise_required_value(space, w_node, 'iter')
         body_w = space.unpackiterable(w_body)
         _body = [stmt.from_object(space, w_item) for w_item in body_w]
         orelse_w = space.unpackiterable(w_orelse)
@@ -843,6 +861,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _test = expr.from_object(space, w_test)
+        if _test is None:
+            raise_required_value(space, w_node, 'test')
         body_w = space.unpackiterable(w_body)
         _body = [stmt.from_object(space, w_item) for w_item in body_w]
         orelse_w = space.unpackiterable(w_orelse)
@@ -903,6 +923,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _test = expr.from_object(space, w_test)
+        if _test is None:
+            raise_required_value(space, w_node, 'test')
         body_w = space.unpackiterable(w_body)
         _body = [stmt.from_object(space, w_item) for w_item in body_w]
         orelse_w = space.unpackiterable(w_orelse)
@@ -959,6 +981,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _context_expr = expr.from_object(space, w_context_expr)
+        if _context_expr is None:
+            raise_required_value(space, w_node, 'context_expr')
         _optional_vars = expr.from_object(space, w_optional_vars)
         body_w = space.unpackiterable(w_body)
         _body = [stmt.from_object(space, w_item) for w_item in body_w]
@@ -1175,6 +1199,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _test = expr.from_object(space, w_test)
+        if _test is None:
+            raise_required_value(space, w_node, 'test')
         _msg = expr.from_object(space, w_msg)
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
@@ -1318,6 +1344,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _body = expr.from_object(space, w_body)
+        if _body is None:
+            raise_required_value(space, w_node, 'body')
         _globals = expr.from_object(space, w_globals)
         _locals = expr.from_object(space, w_locals)
         _lineno = space.int_w(w_lineno)
@@ -1396,6 +1424,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Expr(_value, _lineno, _col_offset)
@@ -1591,6 +1621,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _op = boolop.from_object(space, w_op)
+        if _op is None:
+            raise_required_value(space, w_node, 'op')
         values_w = space.unpackiterable(w_values)
         _values = [expr.from_object(space, w_item) for w_item in values_w]
         _lineno = space.int_w(w_lineno)
@@ -1638,8 +1670,14 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _left = expr.from_object(space, w_left)
+        if _left is None:
+            raise_required_value(space, w_node, 'left')
         _op = operator.from_object(space, w_op)
+        if _op is None:
+            raise_required_value(space, w_node, 'op')
         _right = expr.from_object(space, w_right)
+        if _right is None:
+            raise_required_value(space, w_node, 'right')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return BinOp(_left, _op, _right, _lineno, _col_offset)
@@ -1680,7 +1718,11 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _op = unaryop.from_object(space, w_op)
+        if _op is None:
+            raise_required_value(space, w_node, 'op')
         _operand = expr.from_object(space, w_operand)
+        if _operand is None:
+            raise_required_value(space, w_node, 'operand')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return UnaryOp(_op, _operand, _lineno, _col_offset)
@@ -1722,7 +1764,11 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _args = arguments.from_object(space, w_args)
+        if _args is None:
+            raise_required_value(space, w_node, 'args')
         _body = expr.from_object(space, w_body)
+        if _body is None:
+            raise_required_value(space, w_node, 'body')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Lambda(_args, _body, _lineno, _col_offset)
@@ -1769,8 +1815,14 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _test = expr.from_object(space, w_test)
+        if _test is None:
+            raise_required_value(space, w_node, 'test')
         _body = expr.from_object(space, w_body)
+        if _body is None:
+            raise_required_value(space, w_node, 'body')
         _orelse = expr.from_object(space, w_orelse)
+        if _orelse is None:
+            raise_required_value(space, w_node, 'orelse')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return IfExp(_test, _body, _orelse, _lineno, _col_offset)
@@ -1886,8 +1938,8 @@
 
     def mutate_over(self, visitor):
         self.elt = self.elt.mutate_over(visitor)
-        for i in range(len(self.generators)):
-            self.generators[i] = self.generators[i].mutate_over(visitor)
+        if self.generators:
+            visitor._mutate_sequence(self.generators)
         return visitor.visit_ListComp(self)
 
     def to_object(self, space):
@@ -1913,6 +1965,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _elt = expr.from_object(space, w_elt)
+        if _elt is None:
+            raise_required_value(space, w_node, 'elt')
         generators_w = space.unpackiterable(w_generators)
         _generators = [comprehension.from_object(space, w_item) for w_item in 
generators_w]
         _lineno = space.int_w(w_lineno)
@@ -1934,8 +1988,8 @@
 
     def mutate_over(self, visitor):
         self.elt = self.elt.mutate_over(visitor)
-        for i in range(len(self.generators)):
-            self.generators[i] = self.generators[i].mutate_over(visitor)
+        if self.generators:
+            visitor._mutate_sequence(self.generators)
         return visitor.visit_SetComp(self)
 
     def to_object(self, space):
@@ -1961,6 +2015,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _elt = expr.from_object(space, w_elt)
+        if _elt is None:
+            raise_required_value(space, w_node, 'elt')
         generators_w = space.unpackiterable(w_generators)
         _generators = [comprehension.from_object(space, w_item) for w_item in 
generators_w]
         _lineno = space.int_w(w_lineno)
@@ -1984,8 +2040,8 @@
     def mutate_over(self, visitor):
         self.key = self.key.mutate_over(visitor)
         self.value = self.value.mutate_over(visitor)
-        for i in range(len(self.generators)):
-            self.generators[i] = self.generators[i].mutate_over(visitor)
+        if self.generators:
+            visitor._mutate_sequence(self.generators)
         return visitor.visit_DictComp(self)
 
     def to_object(self, space):
@@ -2014,7 +2070,11 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _key = expr.from_object(space, w_key)
+        if _key is None:
+            raise_required_value(space, w_node, 'key')
         _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         generators_w = space.unpackiterable(w_generators)
         _generators = [comprehension.from_object(space, w_item) for w_item in 
generators_w]
         _lineno = space.int_w(w_lineno)
@@ -2036,8 +2096,8 @@
 
     def mutate_over(self, visitor):
         self.elt = self.elt.mutate_over(visitor)
-        for i in range(len(self.generators)):
-            self.generators[i] = self.generators[i].mutate_over(visitor)
+        if self.generators:
+            visitor._mutate_sequence(self.generators)
         return visitor.visit_GeneratorExp(self)
 
     def to_object(self, space):
@@ -2063,6 +2123,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _elt = expr.from_object(space, w_elt)
+        if _elt is None:
+            raise_required_value(space, w_node, 'elt')
         generators_w = space.unpackiterable(w_generators)
         _generators = [comprehension.from_object(space, w_item) for w_item in 
generators_w]
         _lineno = space.int_w(w_lineno)
@@ -2156,6 +2218,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _left = expr.from_object(space, w_left)
+        if _left is None:
+            raise_required_value(space, w_node, 'left')
         ops_w = space.unpackiterable(w_ops)
         _ops = [cmpop.from_object(space, w_item) for w_item in ops_w]
         comparators_w = space.unpackiterable(w_comparators)
@@ -2228,6 +2292,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _func = expr.from_object(space, w_func)
+        if _func is None:
+            raise_required_value(space, w_node, 'func')
         args_w = space.unpackiterable(w_args)
         _args = [expr.from_object(space, w_item) for w_item in args_w]
         keywords_w = space.unpackiterable(w_keywords)
@@ -2270,6 +2336,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Repr(_value, _lineno, _col_offset)
@@ -2305,6 +2373,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _n = w_n
+        if _n is None:
+            raise_required_value(space, w_node, 'n')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Num(_n, _lineno, _col_offset)
@@ -2340,6 +2410,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _s = check_string(space, w_s)
+        if _s is None:
+            raise_required_value(space, w_node, 's')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Str(_s, _lineno, _col_offset)
@@ -2384,8 +2456,14 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         _attr = space.realstr_w(w_attr)
+        if _attr is None:
+            raise_required_value(space, w_node, 'attr')
         _ctx = expr_context.from_object(space, w_ctx)
+        if _ctx is None:
+            raise_required_value(space, w_node, 'ctx')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Attribute(_value, _attr, _ctx, _lineno, _col_offset)
@@ -2431,8 +2509,14 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         _slice = slice.from_object(space, w_slice)
+        if _slice is None:
+            raise_required_value(space, w_node, 'slice')
         _ctx = expr_context.from_object(space, w_ctx)
+        if _ctx is None:
+            raise_required_value(space, w_node, 'ctx')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Subscript(_value, _slice, _ctx, _lineno, _col_offset)
@@ -2472,7 +2556,11 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _id = space.realstr_w(w_id)
+        if _id is None:
+            raise_required_value(space, w_node, 'id')
         _ctx = expr_context.from_object(space, w_ctx)
+        if _ctx is None:
+            raise_required_value(space, w_node, 'ctx')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Name(_id, _ctx, _lineno, _col_offset)
@@ -2520,6 +2608,8 @@
         elts_w = space.unpackiterable(w_elts)
         _elts = [expr.from_object(space, w_item) for w_item in elts_w]
         _ctx = expr_context.from_object(space, w_ctx)
+        if _ctx is None:
+            raise_required_value(space, w_node, 'ctx')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return List(_elts, _ctx, _lineno, _col_offset)
@@ -2567,6 +2657,8 @@
         elts_w = space.unpackiterable(w_elts)
         _elts = [expr.from_object(space, w_item) for w_item in elts_w]
         _ctx = expr_context.from_object(space, w_ctx)
+        if _ctx is None:
+            raise_required_value(space, w_node, 'ctx')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Tuple(_elts, _ctx, _lineno, _col_offset)
@@ -2602,6 +2694,8 @@
         w_lineno = get_field(space, w_node, 'lineno', False)
         w_col_offset = get_field(space, w_node, 'col_offset', False)
         _value = w_value
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         _lineno = space.int_w(w_lineno)
         _col_offset = space.int_w(w_col_offset)
         return Const(_value, _lineno, _col_offset)
@@ -2808,6 +2902,8 @@
     def from_object(space, w_node):
         w_value = get_field(space, w_node, 'value', False)
         _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         return Index(_value)
 
 State.ast_type('Index', 'slice', ['value'])
@@ -3109,7 +3205,6 @@
     _NotIn,
 ]
 
-@finishsigs
 class comprehension(AST):
 
     def __init__(self, target, iter, ifs):
@@ -3117,7 +3212,6 @@
         self.iter = iter
         self.ifs = ifs
 
-    @signature(types.self(), types.any(), returns=types.self())
     def mutate_over(self, visitor):
         self.target = self.target.mutate_over(visitor)
         self.iter = self.iter.mutate_over(visitor)
@@ -3148,7 +3242,11 @@
         w_iter = get_field(space, w_node, 'iter', False)
         w_ifs = get_field(space, w_node, 'ifs', False)
         _target = expr.from_object(space, w_target)
+        if _target is None:
+            raise_required_value(space, w_node, 'target')
         _iter = expr.from_object(space, w_iter)
+        if _iter is None:
+            raise_required_value(space, w_node, 'iter')
         ifs_w = space.unpackiterable(w_ifs)
         _ifs = [expr.from_object(space, w_item) for w_item in ifs_w]
         return comprehension(_target, _iter, _ifs)
@@ -3307,7 +3405,11 @@
         w_arg = get_field(space, w_node, 'arg', False)
         w_value = get_field(space, w_node, 'value', False)
         _arg = space.realstr_w(w_arg)
+        if _arg is None:
+            raise_required_value(space, w_node, 'arg')
         _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
         return keyword(_arg, _value)
 
 State.ast_type('keyword', 'AST', ['arg', 'value'])
@@ -3337,6 +3439,8 @@
         w_name = get_field(space, w_node, 'name', False)
         w_asname = get_field(space, w_node, 'asname', True)
         _name = space.realstr_w(w_name)
+        if _name is None:
+            raise_required_value(space, w_node, 'name')
         _asname = space.str_or_None_w(w_asname)
         return alias(_name, _asname)
 
diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl 
b/pypy/interpreter/astcompiler/tools/Python.asdl
--- a/pypy/interpreter/astcompiler/tools/Python.asdl
+++ b/pypy/interpreter/astcompiler/tools/Python.asdl
@@ -1,6 +1,6 @@
 -- ASDL's five builtin types are identifier, int, string, object, bool
 
-module Python version "$Revision: 43614 $"
+module Python
 {
        mod = Module(stmt* body)
            | Interactive(stmt* body)
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py 
b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -1,243 +1,53 @@
-"""An implementation of the Zephyr Abstract Syntax Definition Language.
+#-------------------------------------------------------------------------------
+# Parser for ASDL [1] definition files. Reads in an ASDL description and parses
+# it into an AST that describes it.
+#
+# The EBNF we're parsing here: Figure 1 of the paper [1]. Extended to support
+# modules and attributes after a product. Words starting with Capital letters
+# are terminals. Literal tokens are in "double quotes". Others are
+# non-terminals. Id is either TokenId or ConstructorId.
+#
+# module        ::= "module" Id "{" [definitions] "}"
+# definitions   ::= { TypeId "=" type }
+# type          ::= product | sum
+# product       ::= fields ["attributes" fields]
+# fields        ::= "(" { field, "," } field ")"
+# field         ::= TypeId ["?" | "*"] [Id]
+# sum           ::= constructor { "|" constructor } ["attributes" fields]
+# constructor   ::= ConstructorId [fields]
+#
+# [1] "The Zephyr Abstract Syntax Description Language" by Wang, et. al. See
+#     http://asdl.sourceforge.net/
+#-------------------------------------------------------------------------------
+from collections import namedtuple
+import re
 
-See http://asdl.sourceforge.net/ and
-http://www.cs.princeton.edu/~danwang/Papers/dsl97/dsl97-abstract.html.
+__all__ = [
+    'builtin_types', 'parse', 'AST', 'Module', 'Type', 'Constructor',
+    'Field', 'Sum', 'Product', 'VisitorBase', 'Check', 'check']
 
-Only supports top level module decl, not view.  I'm guessing that view
-is intended to support the browser and I'm not interested in the
-browser.
+# The following classes define nodes into which the ASDL description is parsed.
+# Note: this is a "meta-AST". ASDL files (such as Python.asdl) describe the AST
+# structure used by a programming language. But ASDL files themselves need to 
be
+# parsed. This module parses ASDL files and uses a simple AST to represent 
them.
+# See the EBNF at the top of the file to understand the logical connection
+# between the various node types.
 
-Changes for Python: Add support for module versions
-"""
+builtin_types = {'identifier', 'string', 'bytes', 'int', 'bool', 'object',
+                 'singleton'}
 
-import os
-import traceback
+class AST:
+    def __repr__(self):
+        raise NotImplementedError
 
-import spark
-
-class Token(object):
-    # spark seems to dispatch in the parser based on a token's
-    # type attribute
-    def __init__(self, type, lineno):
-        self.type = type
-        self.lineno = lineno
-
-    def __str__(self):
-        return self.type
+class Module(AST):
+    def __init__(self, name, dfns):
+        self.name = name
+        self.dfns = dfns
+        self.types = {type.name: type.value for type in dfns}
 
     def __repr__(self):
-        return str(self)
-
-class Id(Token):
-    def __init__(self, value, lineno):
-        self.type = 'Id'
-        self.value = value
-        self.lineno = lineno
-
-    def __str__(self):
-        return self.value
-
-class String(Token):
-    def __init__(self, value, lineno):
-        self.type = 'String'
-        self.value = value
-        self.lineno = lineno
-
-class ASDLSyntaxError(Exception):
-
-    def __init__(self, lineno, token=None, msg=None):
-        self.lineno = lineno
-        self.token = token
-        self.msg = msg
-
-    def __str__(self):
-        if self.msg is None:
-            return "Error at '%s', line %d" % (self.token, self.lineno)
-        else:
-            return "%s, line %d" % (self.msg, self.lineno)
-
-class ASDLScanner(spark.GenericScanner, object):
-
-    def tokenize(self, input):
-        self.rv = []
-        self.lineno = 1
-        super(ASDLScanner, self).tokenize(input)
-        return self.rv
-
-    def t_id(self, s):
-        r"[\w\.]+"
-        # XXX doesn't distinguish upper vs. lower, which is
-        # significant for ASDL.
-        self.rv.append(Id(s, self.lineno))
-
-    def t_string(self, s):
-        r'"[^"]*"'
-        self.rv.append(String(s, self.lineno))
-
-    def t_xxx(self, s): # not sure what this production means
-        r"<="
-        self.rv.append(Token(s, self.lineno))
-
-    def t_punctuation(self, s):
-        r"[\{\}\*\=\|\(\)\,\?\:]"
-        self.rv.append(Token(s, self.lineno))
-
-    def t_comment(self, s):
-        r"\-\-[^\n]*"
-        pass
-
-    def t_newline(self, s):
-        r"\n"
-        self.lineno += 1
-
-    def t_whitespace(self, s):
-        r"[ \t]+"
-        pass
-
-    def t_default(self, s):
-        r" . +"
-        raise ValueError("unmatched input: %s" % `s`)
-
-class ASDLParser(spark.GenericParser, object):
-    def __init__(self):
-        super(ASDLParser, self).__init__("module")
-
-    def typestring(self, tok):
-        return tok.type
-
-    def error(self, tok):
-        raise ASDLSyntaxError(tok.lineno, tok)
-
-    def p_module_0(self, (module, name, version, _0, _1)):
-        " module ::= Id Id version { } "
-        if module.value != "module":
-            raise ASDLSyntaxError(module.lineno,
-                                  msg="expected 'module', found %s" % module)
-        return Module(name, None, version)
-
-    def p_module(self, (module, name, version, _0, definitions, _1)):
-        " module ::= Id Id version { definitions } "
-        if module.value != "module":
-            raise ASDLSyntaxError(module.lineno,
-                                  msg="expected 'module', found %s" % module)
-        return Module(name, definitions, version)
-
-    def p_version(self, (version, V)):
-        "version ::= Id String"
-        if version.value != "version":
-            raise ASDLSyntaxError(version.lineno,
-                                msg="expected 'version', found %" % version)
-        return V
-
-    def p_definition_0(self, (definition,)):
-        " definitions ::= definition "
-        return definition
-
-    def p_definition_1(self, (definitions, definition)):
-        " definitions ::= definition definitions "
-        return definitions + definition
-
-    def p_definition(self, (id, _, type)):
-        " definition ::= Id = type "
-        return [Type(id, type)]
-
-    def p_type_0(self, (product,)):
-        " type ::= product "
-        return product
-
-    def p_type_1(self, (sum,)):
-        " type ::= sum "
-        return Sum(sum)
-
-    def p_type_2(self, (sum, id, _0, attributes, _1)):
-        " type ::= sum Id ( fields ) "
-        if id.value != "attributes":
-            raise ASDLSyntaxError(id.lineno,
-                                  msg="expected attributes, found %s" % id)
-        if attributes:
-            attributes.reverse()
-        return Sum(sum, attributes)
-
-    def p_product(self, (_0, fields, _1)):
-        " product ::= ( fields ) "
-        # XXX can't I just construct things in the right order?
-        fields.reverse()
-        return Product(fields)
-
-    def p_sum_0(self, (constructor,)):
-        " sum ::= constructor "
-        return [constructor]
-
-    def p_sum_1(self, (constructor, _, sum)):
-        " sum ::= constructor | sum "
-        return [constructor] + sum
-
-    def p_sum_2(self, (constructor, _, sum)):
-        " sum ::= constructor | sum "
-        return [constructor] + sum
-
-    def p_constructor_0(self, (id,)):
-        " constructor ::= Id "
-        return Constructor(id)
-
-    def p_constructor_1(self, (id, _0, fields, _1)):
-        " constructor ::= Id ( fields ) "
-        # XXX can't I just construct things in the right order?
-        fields.reverse()
-        return Constructor(id, fields)
-
-    def p_fields_0(self, (field,)):
-        " fields ::= field "
-        return [field]
-
-    def p_fields_1(self, (field, _, fields)):
-        " fields ::= field , fields "
-        return fields + [field]
-
-    def p_field_0(self, (type,)):
-        " field ::= Id "
-        return Field(type)
-
-    def p_field_1(self, (type, name)):
-        " field ::= Id Id "
-        return Field(type, name)
-
-    def p_field_2(self, (type, _, name)):
-        " field ::= Id * Id "
-        return Field(type, name, seq=True)
-
-    def p_field_3(self, (type, _, name)):
-        " field ::= Id ? Id "
-        return Field(type, name, opt=True)
-
-    def p_field_4(self, (type, _)):
-        " field ::= Id * "
-        return Field(type, seq=True)
-
-    def p_field_5(self, (type, _)):
-        " field ::= Id ? "
-        return Field(type, opt=True)
-
-builtin_types = ("identifier", "string", "int", "bool", "object")
-
-# below is a collection of classes to capture the AST of an AST :-)
-# not sure if any of the methods are useful yet, but I'm adding them
-# piecemeal as they seem helpful
-
-class AST(object):
-    pass # a marker class
-
-class Module(AST):
-    def __init__(self, name, dfns, version):
-        self.name = name
-        self.dfns = dfns
-        self.version = version
-        self.types = {} # maps type name to value (from dfns)
-        for type in dfns:
-            self.types[type.name.value] = type.value
-
-    def __repr__(self):
-        return "Module(%s, %s)" % (self.name, self.dfns)
+        return 'Module({0.name}, {0.dfns})'.format(self)
 
 class Type(AST):
     def __init__(self, name, value):
@@ -245,7 +55,7 @@
         self.value = value
 
     def __repr__(self):
-        return "Type(%s, %s)" % (self.name, self.value)
+        return 'Type({0.name}, {0.value})'.format(self)
 
 class Constructor(AST):
     def __init__(self, name, fields=None):
@@ -253,7 +63,7 @@
         self.fields = fields or []
 
     def __repr__(self):
-        return "Constructor(%s, %s)" % (self.name, self.fields)
+        return 'Constructor({0.name}, {0.fields})'.format(self)
 
 class Field(AST):
     def __init__(self, type, name=None, seq=False, opt=False):
@@ -270,9 +80,9 @@
         else:
             extra = ""
         if self.name is None:
-            return "Field(%s%s)" % (self.type, extra)
+            return 'Field({0.type}{1})'.format(self, extra)
         else:
-            return "Field(%s, %s%s)" % (self.type, self.name, extra)
+            return 'Field({0.type}, {0.name}{1})'.format(self, extra)
 
 class Sum(AST):
     def __init__(self, types, attributes=None):
@@ -280,47 +90,54 @@
         self.attributes = attributes or []
 
     def __repr__(self):
-        if self.attributes is None:
-            return "Sum(%s)" % self.types
+        if self.attributes:
+            return 'Sum({0.types}, {0.attributes})'.format(self)
         else:
-            return "Sum(%s, %s)" % (self.types, self.attributes)
+            return 'Sum({0.types})'.format(self)
 
 class Product(AST):
-    def __init__(self, fields):
+    def __init__(self, fields, attributes=None):
         self.fields = fields
+        self.attributes = attributes or []
 
     def __repr__(self):
-        return "Product(%s)" % self.fields
+        if self.attributes:
+            return 'Product({0.fields}, {0.attributes})'.format(self)
+        else:
+            return 'Product({0.fields})'.format(self)
+
+# A generic visitor for the meta-AST that describes ASDL. This can be used by
+# emitters. Note that this visitor does not provide a generic visit method, so 
a
+# subclass needs to define visit methods from visitModule to as deep as the
+# interesting node.
+# We also define a Check visitor that makes sure the parsed ASDL is 
well-formed.
 
 class VisitorBase(object):
+    """Generic tree visitor for ASTs."""
+    def __init__(self):
+        self.cache = {}
 
-    def __init__(self, skip=False):
-        self.cache = {}
-        self.skip = skip
-
-    def visit(self, object, *args):
-        meth = self._dispatch(object)
-        if meth is None:
-            return
-        meth(object, *args)
-
-    def _dispatch(self, object):
-        assert isinstance(object, AST), repr(object)
-        klass = object.__class__
+    def visit(self, obj, *args):
+        klass = obj.__class__
         meth = self.cache.get(klass)
         if meth is None:
             methname = "visit" + klass.__name__
-            if self.skip:
-                meth = getattr(self, methname, None)
-            else:
-                meth = getattr(self, methname)
+            meth = getattr(self, methname, None)
             self.cache[klass] = meth
-        return meth
+        if meth:
+            try:
+                meth(obj, *args)
+            except Exception as e:
+                print("Error visiting %r: %s" % (obj, e))
+                raise
 
 class Check(VisitorBase):
+    """A visitor that checks a parsed ASDL tree for correctness.
 
+    Errors are printed and accumulated.
+    """
     def __init__(self):
-        super(Check, self).__init__(skip=True)
+        super(Check, self).__init__()
         self.cons = {}
         self.errors = 0
         self.types = {}
@@ -342,8 +159,8 @@
         if conflict is None:
             self.cons[key] = name
         else:
-            print "Redefinition of constructor %s" % key
-            print "Defined in %s and %s" % (conflict, name)
+            print('Redefinition of constructor {}'.format(key))
+            print('Defined in {} and {}'.format(conflict, name))
             self.errors += 1
         for f in cons.fields:
             self.visit(f, key)
@@ -358,6 +175,11 @@
             self.visit(f, name)
 
 def check(mod):
+    """Check the parsed ASDL tree for correctness.
+
+    Return True if success. For failure, the errors are printed out and False
+    is returned.
+    """
     v = Check()
     v.visit(mod)
 
@@ -365,40 +187,190 @@
         if t not in mod.types and not t in builtin_types:
             v.errors += 1
             uses = ", ".join(v.types[t])
-            print "Undefined type %s, used in %s" % (t, uses)
-
+            print('Undefined type {}, used in {}'.format(t, uses))
     return not v.errors
 
-def parse(file):
-    scanner = ASDLScanner()
-    parser = ASDLParser()
+# The ASDL parser itself comes next. The only interesting external interface
+# here is the top-level parse function.
 
-    buf = open(file).read()
-    tokens = scanner.tokenize(buf)
-    try:
-        return parser.parse(tokens)
-    except ASDLSyntaxError as err:
-        print err
-        lines = buf.split("\n")
-        print lines[err.lineno - 1] # lines starts at 0, files at 1
+def parse(filename):
+    """Parse ASDL from the given file and return a Module node describing 
it."""
+    with open(filename) as f:
+        parser = ASDLParser()
+        return parser.parse(f.read())
 
-if __name__ == "__main__":
-    import glob
-    import sys
+# Types for describing tokens in an ASDL specification.
+class TokenKind:
+    """TokenKind is provides a scope for enumerated token kinds."""
+    (ConstructorId, TypeId, Equals, Comma, Question, Pipe, Asterisk,
+     LParen, RParen, LBrace, RBrace) = range(11)
 
-    if len(sys.argv) > 1:
-        files = sys.argv[1:]
-    else:
-        testdir = "tests"
-        files = glob.glob(testdir + "/*.asdl")
+    operator_table = {
+        '=': Equals, ',': Comma,    '?': Question, '|': Pipe,    '(': LParen,
+        ')': RParen, '*': Asterisk, '{': LBrace,   '}': RBrace}
 
-    for file in files:
-        print file
-        mod = parse(file)
-        print "module", mod.name
-        print len(mod.dfns), "definitions"
-        if not check(mod):
-            print "Check failed"
+Token = namedtuple('Token', 'kind value lineno')
+
+class ASDLSyntaxError(Exception):
+    def __init__(self, msg, lineno=None):
+        self.msg = msg
+        self.lineno = lineno or '<unknown>'
+
+    def __str__(self):
+        return 'Syntax error on line {0.lineno}: {0.msg}'.format(self)
+
+def tokenize_asdl(buf):
+    """Tokenize the given buffer. Yield Token objects."""
+    for lineno, line in enumerate(buf.splitlines(), 1):
+        for m in re.finditer(r'\s*(\w+|--.*|.)', line.strip()):
+            c = m.group(1)
+            if c[0].isalpha():
+                # Some kind of identifier
+                if c[0].isupper():
+                    yield Token(TokenKind.ConstructorId, c, lineno)
+                else:
+                    yield Token(TokenKind.TypeId, c, lineno)
+            elif c[:2] == '--':
+                # Comment
+                break
+            else:
+                # Operators
+                try:
+                    op_kind = TokenKind.operator_table[c]
+                except KeyError:
+                    raise ASDLSyntaxError('Invalid operator %s' % c, lineno)
+                yield Token(op_kind, c, lineno)
+
+class ASDLParser:
+    """Parser for ASDL files.
+
+    Create, then call the parse method on a buffer containing ASDL.
+    This is a simple recursive descent parser that uses tokenize_asdl for the
+    lexing.
+    """
+    def __init__(self):
+        self._tokenizer = None
+        self.cur_token = None
+
+    def parse(self, buf):
+        """Parse the ASDL in the buffer and return an AST with a Module root.
+        """
+        self._tokenizer = tokenize_asdl(buf)
+        self._advance()
+        return self._parse_module()
+
+    def _parse_module(self):
+        if self._at_keyword('module'):
+            self._advance()
         else:
-            for dfn in mod.dfns:
-                print dfn.type
+            raise ASDLSyntaxError(
+                'Expected "module" (found {})'.format(self.cur_token.value),
+                self.cur_token.lineno)
+        name = self._match(self._id_kinds)
+        self._match(TokenKind.LBrace)
+        defs = self._parse_definitions()
+        self._match(TokenKind.RBrace)
+        return Module(name, defs)
+
+    def _parse_definitions(self):
+        defs = []
+        while self.cur_token.kind == TokenKind.TypeId:
+            typename = self._advance()
+            self._match(TokenKind.Equals)
+            type = self._parse_type()
+            defs.append(Type(typename, type))
+        return defs
+
+    def _parse_type(self):
+        if self.cur_token.kind == TokenKind.LParen:
+            # If we see a (, it's a product
+            return self._parse_product()
+        else:
+            # Otherwise it's a sum. Look for ConstructorId
+            sumlist = [Constructor(self._match(TokenKind.ConstructorId),
+                                   self._parse_optional_fields())]
+            while self.cur_token.kind  == TokenKind.Pipe:
+                # More constructors
+                self._advance()
+                sumlist.append(Constructor(
+                                self._match(TokenKind.ConstructorId),
+                                self._parse_optional_fields()))
+            return Sum(sumlist, self._parse_optional_attributes())
+
+    def _parse_product(self):
+        return Product(self._parse_fields(), self._parse_optional_attributes())
+
+    def _parse_fields(self):
+        fields = []
+        self._match(TokenKind.LParen)
+        while self.cur_token.kind == TokenKind.TypeId:
+            typename = self._advance()
+            is_seq, is_opt = self._parse_optional_field_quantifier()
+            id = (self._advance() if self.cur_token.kind in self._id_kinds
+                                  else None)
+            fields.append(Field(typename, id, seq=is_seq, opt=is_opt))
+            if self.cur_token.kind == TokenKind.RParen:
+                break
+            elif self.cur_token.kind == TokenKind.Comma:
+                self._advance()
+        self._match(TokenKind.RParen)
+        return fields
+
+    def _parse_optional_fields(self):
+        if self.cur_token.kind == TokenKind.LParen:
+            return self._parse_fields()
+        else:
+            return None
+
+    def _parse_optional_attributes(self):
+        if self._at_keyword('attributes'):
+            self._advance()
+            return self._parse_fields()
+        else:
+            return None
+
+    def _parse_optional_field_quantifier(self):
+        is_seq, is_opt = False, False
+        if self.cur_token.kind == TokenKind.Asterisk:
+            is_seq = True
+            self._advance()
+        elif self.cur_token.kind == TokenKind.Question:
+            is_opt = True
+            self._advance()
+        return is_seq, is_opt
+
+    def _advance(self):
+        """ Return the value of the current token and read the next one into
+            self.cur_token.
+        """
+        cur_val = None if self.cur_token is None else self.cur_token.value
+        try:
+            self.cur_token = next(self._tokenizer)
+        except StopIteration:
+            self.cur_token = None
+        return cur_val
+
+    _id_kinds = (TokenKind.ConstructorId, TokenKind.TypeId)
+
+    def _match(self, kind):
+        """The 'match' primitive of RD parsers.
+
+        * Verifies that the current token is of the given kind (kind can
+          be a tuple, in which the kind must match one of its members).
+        * Returns the value of the current token
+        * Reads in the next token
+        """
+        if (isinstance(kind, tuple) and self.cur_token.kind in kind or
+            self.cur_token.kind == kind
+            ):
+            value = self.cur_token.value
+            self._advance()
+            return value
+        else:
+            raise ASDLSyntaxError(
+                'Unmatched {} (found {})'.format(kind, self.cur_token.kind),
+                self.cur_token.lineno)
+
+    def _at_keyword(self, keyword):
+        return (self.cur_token.kind == TokenKind.TypeId and
+                self.cur_token.value == keyword)
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py 
b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -85,7 +85,7 @@
             self.emit("class %s(AST):" % (base,))
             if sum.attributes:
                 self.emit("")
-                args = ", ".join(attr.name.value for attr in sum.attributes)
+                args = ", ".join(attr.name for attr in sum.attributes)
                 self.emit("def __init__(self, %s):" % (args,), 1)
                 for attr in sum.attributes:
                     self.visit(attr)
@@ -101,8 +101,8 @@
                           % (typ.name,), 3)
             self.emit("raise oefmt(space.w_TypeError,", 2)
             self.emit("        \"Expected %s node, got %%T\", w_node)" % 
(base,), 2)
-            self.emit("State.ast_type('%r', 'AST', None, %s)" %
-                      (base, [repr(attr.name) for attr in sum.attributes]))
+            self.emit("State.ast_type(%r, 'AST', None, %s)" %
+                      (base, [attr.name for attr in sum.attributes]))
             self.emit("")
             for cons in sum.types:
                 self.visit(cons, base, sum.attributes)
@@ -118,37 +118,37 @@
         self.emit("visitor.visit_%s(self)" % (name,), 2)
         self.emit("")
         self.make_converters(product.fields, name)
-        self.emit("State.ast_type('%r', 'AST', %s)" %
-                  (name, [repr(f.name) for f in product.fields]))
+        self.emit("State.ast_type(%r, 'AST', %s)" %
+                  (name, [f.name for f in product.fields]))
         self.emit("")
 
     def get_value_converter(self, field, value):
-        if field.type.value in self.data.simple_types:
+        if field.type in self.data.simple_types:
             return "%s_to_class[%s - 1]().to_object(space)" % (field.type, 
value)
-        elif field.type.value in ("object", "string"):
+        elif field.type in ("object", "singleton", "string", "bytes"):
             return value
-        elif field.type.value in ("identifier", "int", "bool"):
+        elif field.type in ("identifier", "int", "bool"):
             return "space.wrap(%s)" % (value,)
         else:
             wrapper = "%s.to_object(space)" % (value,)
             if field.opt:
                 wrapper += " if %s is not None else space.w_None" % (value,)
             return wrapper
-        
+
     def get_value_extractor(self, field, value):
-        if field.type.value in self.data.simple_types:
+        if field.type in self.data.simple_types:
             return "%s.from_object(space, %s)" % (field.type, value)
-        elif field.type.value in ("object",):
+        elif field.type in ("object","singleton"):
             return value
-        elif field.type.value in ("string",):
+        elif field.type in ("string","bytes"):
             return "check_string(space, %s)" % (value,)
-        elif field.type.value in ("identifier",):
+        elif field.type in ("identifier",):
             if field.opt:
                 return "space.str_or_None_w(%s)" % (value,)
             return "space.realstr_w(%s)" % (value,)
-        elif field.type.value in ("int",):
+        elif field.type in ("int",):
             return "space.int_w(%s)" % (value,)
-        elif field.type.value in ("bool",):
+        elif field.type in ("bool",):
             return "space.bool_w(%s)" % (value,)
         else:
             return "%s.from_object(space, %s)" % (field.type, value)
@@ -179,6 +179,11 @@
         else:
             value = self.get_value_extractor(field, "w_%s" % (field.name,))
             lines = ["_%s = %s" % (field.name, value)]
+            if not field.opt and field.type not in ("int",):
+                lines.append("if _%s is None:" % (field.name,))
+                lines.append("    raise_required_value(space, w_node, '%s')"
+                             % (field.name,))
+
         return lines
 
     def make_converters(self, fields, name, extras=None):
@@ -216,12 +221,12 @@
             if extras:
                 base_args = ", ".join(str(field.name) for field in extras)
                 self.emit("%s.__init__(self, %s)" % (base, base_args), 2)
-    
+
     def make_mutate_over(self, cons, name):
         self.emit("def mutate_over(self, visitor):", 1)
         for field in cons.fields:
-            if (field.type.value not in asdl.builtin_types and
-                field.type.value not in self.data.simple_types):
+            if (field.type not in asdl.builtin_types and
+                field.type not in self.data.simple_types):
                 if field.opt or field.seq:
                     level = 3
                     self.emit("if self.%s:" % (field.name,), 2)
@@ -247,8 +252,8 @@
         self.emit("")
         self.make_mutate_over(cons, cons.name)
         self.make_converters(cons.fields, cons.name, extra_attributes)
-        self.emit("State.ast_type('%r', '%s', %s)" % 
-                  (cons.name, base, [repr(f.name) for f in cons.fields]))
+        self.emit("State.ast_type(%r, '%s', %s)" %
+                  (cons.name, base, [f.name for f in cons.fields]))
         self.emit("")
 
     def visitField(self, field):
@@ -320,8 +325,8 @@
         self.emit("")
 
     def visitField(self, field):
-        if (field.type.value not in asdl.builtin_types and 
-            field.type.value not in self.data.simple_types):
+        if (field.type not in asdl.builtin_types and
+            field.type not in self.data.simple_types):
             level = 2
             template = "node.%s.walkabout(self)"
             if field.seq:
@@ -362,7 +367,7 @@
             if isinstance(tp.value, asdl.Sum):
                 sum = tp.value
                 if is_simple_sum(sum):
-                    simple_types.add(tp.name.value)
+                    simple_types.add(tp.name)
                 else:
                     attrs = [field for field in sum.attributes]
                     for cons in sum.types:
@@ -370,7 +375,7 @@
                         cons_attributes[cons] = attrs
             else:
                 prod = tp.value
-                prod_simple.add(tp.name.value)
+                prod_simple.add(tp.name)
                 add_masks(prod.fields, prod)
         prod_simple.update(simple_types)
         self.cons_attributes = cons_attributes
@@ -391,10 +396,9 @@
 from pypy.interpreter.gateway import interp2app
 
 
-def raise_attriberr(space, w_obj, name):
-    raise oefmt(space.w_AttributeError,
-                "'%T' object has no attribute '%s'", w_obj, name)
-
+def raise_required_value(space, w_obj, name):
+    raise oefmt(space.w_ValueError,
+                "field %s is required for %T", name, w_obj)
 
 def check_string(space, w_obj):
     if not (space.isinstance_w(w_obj, space.w_str) or
@@ -522,7 +526,7 @@
         self.w_AST = space.gettypeobject(W_AST.typedef)
         for (name, base, fields, attributes) in self.AST_TYPES:
             self.make_new_type(space, name, base, fields, attributes)
-        
+
     def make_new_type(self, space, name, base, fields, attributes):
         w_base = getattr(self, 'w_%s' % base)
         w_dict = space.newdict()
@@ -534,7 +538,7 @@
             space.setitem_str(w_dict, "_attributes",
                               space.newtuple([space.wrap(a) for a in 
attributes]))
         w_type = space.call_function(
-            space.w_type, 
+            space.w_type,
             space.wrap(name), space.newtuple([w_base]), w_dict)
         setattr(self, 'w_%s' % name, w_type)
 
diff --git a/pypy/interpreter/astcompiler/tools/spark.py 
b/pypy/interpreter/astcompiler/tools/spark.py
deleted file mode 100644
--- a/pypy/interpreter/astcompiler/tools/spark.py
+++ /dev/null
@@ -1,839 +0,0 @@
-#  Copyright (c) 1998-2002 John Aycock
-#
-#  Permission is hereby granted, free of charge, to any person obtaining
-#  a copy of this software and associated documentation files (the
-#  "Software"), to deal in the Software without restriction, including
-#  without limitation the rights to use, copy, modify, merge, publish,
-#  distribute, sublicense, and/or sell copies of the Software, and to
-#  permit persons to whom the Software is furnished to do so, subject to
-#  the following conditions:
-#
-#  The above copyright notice and this permission notice shall be
-#  included in all copies or substantial portions of the Software.
-#
-#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-#  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-#  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-#  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-#  CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-#  TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-#  SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-__version__ = 'SPARK-0.7 (pre-alpha-5)'
-
-import re
-import string
-
-def _namelist(instance):
-    namelist, namedict, classlist = [], {}, [instance.__class__]
-    for c in classlist:
-        for b in c.__bases__:
-            classlist.append(b)
-        for name in c.__dict__.keys():
-            if not namedict.has_key(name):
-                namelist.append(name)
-                namedict[name] = 1
-    return namelist
-
-class GenericScanner:
-    def __init__(self, flags=0):
-        pattern = self.reflect()
-        self.re = re.compile(pattern, re.VERBOSE|flags)
-
-        self.index2func = {}
-        for name, number in self.re.groupindex.items():
-            self.index2func[number-1] = getattr(self, 't_' + name)
-
-    def makeRE(self, name):
-        doc = getattr(self, name).__doc__
-        rv = '(?P<%s>%s)' % (name[2:], doc)
-        return rv
-
-    def reflect(self):
-        rv = []
-        for name in _namelist(self):
-            if name[:2] == 't_' and name != 't_default':
-                rv.append(self.makeRE(name))
-
-        rv.append(self.makeRE('t_default'))
-        return string.join(rv, '|')
-
-    def error(self, s, pos):
-        print "Lexical error at position %s" % pos
-        raise SystemExit
-
-    def tokenize(self, s):
-        pos = 0
-        n = len(s)
-        while pos < n:
-            m = self.re.match(s, pos)
-            if m is None:
-                self.error(s, pos)
-
-            groups = m.groups()
-            for i in range(len(groups)):
-                if groups[i] and self.index2func.has_key(i):
-                    self.index2func[i](groups[i])
-            pos = m.end()
-
-    def t_default(self, s):
-        r'( . | \n )+'
-        print "Specification error: unmatched input"
-        raise SystemExit
-
-#
-#  Extracted from GenericParser and made global so that [un]picking works.
-#
-class _State:
-    def __init__(self, stateno, items):
-        self.T, self.complete, self.items = [], [], items
-        self.stateno = stateno
-
-class GenericParser:
-    #
-    #  An Earley parser, as per J. Earley, "An Efficient Context-Free
-    #  Parsing Algorithm", CACM 13(2), pp. 94-102.  Also J. C. Earley,
-    #  "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
-    #  Carnegie-Mellon University, August 1968.  New formulation of
-    #  the parser according to J. Aycock, "Practical Earley Parsing
-    #  and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
-    #  2001, and J. Aycock and R. N. Horspool, "Practical Earley
-    #  Parsing", unpublished paper, 2001.
-    #
-
-    def __init__(self, start):
-        self.rules = {}
-        self.rule2func = {}
-        self.rule2name = {}
-        self.collectRules()
-        self.augment(start)
-        self.ruleschanged = 1
-
-    _NULLABLE = '\e_'
-    _START = 'START'
-    _BOF = '|-'
-
-    #
-    #  When pickling, take the time to generate the full state machine;
-    #  some information is then extraneous, too.  Unfortunately we
-    #  can't save the rule2func map.
-    #
-    def __getstate__(self):
-        if self.ruleschanged:
-            #
-            #  XXX - duplicated from parse()
-            #
-            self.computeNull()
-            self.newrules = {}
-            self.new2old = {}
-            self.makeNewRules()
-            self.ruleschanged = 0
-            self.edges, self.cores = {}, {}
-            self.states = { 0: self.makeState0() }
-            self.makeState(0, self._BOF)
-        #
-        #  XXX - should find a better way to do this..
-        #
-        changes = 1
-        while changes:
-            changes = 0
-            for k, v in self.edges.items():
-                if v is None:
-                    state, sym = k
-                    if self.states.has_key(state):
-                        self.goto(state, sym)
-                        changes = 1
-        rv = self.__dict__.copy()
-        for s in self.states.values():
-            del s.items
-        del rv['rule2func']
-        del rv['nullable']
-        del rv['cores']
-        return rv
-
-    def __setstate__(self, D):
-        self.rules = {}
-        self.rule2func = {}
-        self.rule2name = {}
-        self.collectRules()
-        start = D['rules'][self._START][0][1][1]        # Blech.
-        self.augment(start)
-        D['rule2func'] = self.rule2func
-        D['makeSet'] = self.makeSet_fast
-        self.__dict__ = D
-
-    #
-    #  A hook for GenericASTBuilder and GenericASTMatcher.  Mess
-    #  thee not with this; nor shall thee toucheth the _preprocess
-    #  argument to addRule.
-    #
-    def preprocess(self, rule, func):       return rule, func
-
-    def addRule(self, doc, func, _preprocess=1):
-        fn = func
-        rules = string.split(doc)
-
-        index = []
-        for i in range(len(rules)):
-            if rules[i] == '::=':
-                index.append(i-1)
-        index.append(len(rules))
-
-        for i in range(len(index)-1):
-            lhs = rules[index[i]]
-            rhs = rules[index[i]+2:index[i+1]]
-            rule = (lhs, tuple(rhs))
-
-            if _preprocess:
-                rule, fn = self.preprocess(rule, func)
-
-            if self.rules.has_key(lhs):
-                self.rules[lhs].append(rule)
-            else:
-                self.rules[lhs] = [ rule ]
-            self.rule2func[rule] = fn
-            self.rule2name[rule] = func.__name__[2:]
-        self.ruleschanged = 1
-
-    def collectRules(self):
-        for name in _namelist(self):
-            if name[:2] == 'p_':
-                func = getattr(self, name)
-                doc = func.__doc__
-                self.addRule(doc, func)
-
-    def augment(self, start):
-        rule = '%s ::= %s %s' % (self._START, self._BOF, start)
-        self.addRule(rule, lambda args: args[1], 0)
-
-    def computeNull(self):
-        self.nullable = {}
-        tbd = []
-
-        for rulelist in self.rules.values():
-            lhs = rulelist[0][0]
-            self.nullable[lhs] = 0
-            for rule in rulelist:
-                rhs = rule[1]
-                if len(rhs) == 0:
-                    self.nullable[lhs] = 1
-                    continue
-                #
-                #  We only need to consider rules which
-                #  consist entirely of nonterminal symbols.
-                #  This should be a savings on typical
-                #  grammars.
-                #
-                for sym in rhs:
-                    if not self.rules.has_key(sym):
-                        break
-                else:
-                    tbd.append(rule)
-        changes = 1
-        while changes:
-            changes = 0
-            for lhs, rhs in tbd:
-                if self.nullable[lhs]:
-                    continue
-                for sym in rhs:
-                    if not self.nullable[sym]:
-                        break
-                else:
-                    self.nullable[lhs] = 1
-                    changes = 1
-
-    def makeState0(self):
-        s0 = _State(0, [])
-        for rule in self.newrules[self._START]:
-            s0.items.append((rule, 0))
-        return s0
-
-    def finalState(self, tokens):
-        #
-        #  Yuck.
-        #
-        if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
-            return 1
-        start = self.rules[self._START][0][1][1]
-        return self.goto(1, start)
-
-    def makeNewRules(self):
-        worklist = []
-        for rulelist in self.rules.values():
-            for rule in rulelist:
-                worklist.append((rule, 0, 1, rule))
-
-        for rule, i, candidate, oldrule in worklist:
-            lhs, rhs = rule
-            n = len(rhs)
-            while i < n:
-                sym = rhs[i]
-                if not self.rules.has_key(sym) or \
-                   not self.nullable[sym]:
-                    candidate = 0
-                    i = i + 1
-                    continue
-
-                newrhs = list(rhs)
-                newrhs[i] = self._NULLABLE+sym
-                newrule = (lhs, tuple(newrhs))
-                worklist.append((newrule, i+1,
-                                 candidate, oldrule))
-                candidate = 0
-                i = i + 1
-            else:
-                if candidate:
-                    lhs = self._NULLABLE+lhs
-                    rule = (lhs, rhs)
-                if self.newrules.has_key(lhs):
-                    self.newrules[lhs].append(rule)
-                else:
-                    self.newrules[lhs] = [ rule ]
-                self.new2old[rule] = oldrule
-
-    def typestring(self, token):
-        return None
-
-    def error(self, token):
-        print "Syntax error at or near `%s' token" % token
-        raise SystemExit
-
-    def parse(self, tokens):
-        sets = [ [(1,0), (2,0)] ]
-        self.links = {}
-
-        if self.ruleschanged:
-            self.computeNull()
-            self.newrules = {}
-            self.new2old = {}
-            self.makeNewRules()
-            self.ruleschanged = 0
-            self.edges, self.cores = {}, {}
-            self.states = { 0: self.makeState0() }
-            self.makeState(0, self._BOF)
-
-        for i in xrange(len(tokens)):
-            sets.append([])
-
-            if sets[i] == []:
-                break
-            self.makeSet(tokens[i], sets, i)
-        else:
-            sets.append([])
-            self.makeSet(None, sets, len(tokens))
-
-        #_dump(tokens, sets, self.states)
-
-        finalitem = (self.finalState(tokens), 0)
-        if finalitem not in sets[-2]:
-            if len(tokens) > 0:
-                self.error(tokens[i-1])
-            else:
-                self.error(None)
-
-        return self.buildTree(self._START, finalitem,
-                              tokens, len(sets)-2)
-
-    def isnullable(self, sym):
-        #
-        #  For symbols in G_e only.  If we weren't supporting 1.5,
-        #  could just use sym.startswith().
-        #
-        return self._NULLABLE == sym[0:len(self._NULLABLE)]
-
-    def skip(self, (lhs, rhs), pos=0):
-        n = len(rhs)
-        while pos < n:
-            if not self.isnullable(rhs[pos]):
-                break
-            pos = pos + 1
-        return pos
-
-    def makeState(self, state, sym):
-        assert sym is not None
-        #
-        #  Compute \epsilon-kernel state's core and see if
-        #  it exists already.
-        #
-        kitems = []
-        for rule, pos in self.states[state].items:
-            lhs, rhs = rule
-            if rhs[pos:pos+1] == (sym,):
-                kitems.append((rule, self.skip(rule, pos+1)))
-        core = kitems
-
-        core.sort()
-        tcore = tuple(core)
-        if self.cores.has_key(tcore):
-            return self.cores[tcore]
-        #
-        #  Nope, doesn't exist.  Compute it and the associated
-        #  \epsilon-nonkernel state together; we'll need it right away.
-        #
-        k = self.cores[tcore] = len(self.states)
-        K, NK = _State(k, kitems), _State(k+1, [])
-        self.states[k] = K
-        predicted = {}
-
-        edges = self.edges
-        rules = self.newrules
-        for X in K, NK:
-            worklist = X.items
-            for item in worklist:
-                rule, pos = item
-                lhs, rhs = rule
-                if pos == len(rhs):
-                    X.complete.append(rule)
-                    continue
-
-                nextSym = rhs[pos]
-                key = (X.stateno, nextSym)
-                if not rules.has_key(nextSym):
-                    if not edges.has_key(key):
-                        edges[key] = None
-                        X.T.append(nextSym)
-                else:
-                    edges[key] = None
-                    if not predicted.has_key(nextSym):
-                        predicted[nextSym] = 1
-                        for prule in rules[nextSym]:
-                            ppos = self.skip(prule)
-                            new = (prule, ppos)
-                            NK.items.append(new)
-            #
-            #  Problem: we know K needs generating, but we
-            #  don't yet know about NK.  Can't commit anything
-            #  regarding NK to self.edges until we're sure.  Should
-            #  we delay committing on both K and NK to avoid this
-            #  hacky code?  This creates other problems..
-            #
-            if X is K:
-                edges = {}
-
-        if NK.items == []:
-            return k
-
-        #
-        #  Check for \epsilon-nonkernel's core.  Unfortunately we
-        #  need to know the entire set of predicted nonterminals
-        #  to do this without accidentally duplicating states.
-        #
-        core = predicted.keys()
-        core.sort()
-        tcore = tuple(core)
-        if self.cores.has_key(tcore):
-            self.edges[(k, None)] = self.cores[tcore]
-            return k
-
-        nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
-        self.edges.update(edges)
-        self.states[nk] = NK
-        return k
-
-    def goto(self, state, sym):
-        key = (state, sym)
-        if not self.edges.has_key(key):
-            #
-            #  No transitions from state on sym.
-            #
-            return None
-
-        rv = self.edges[key]
-        if rv is None:
-            #
-            #  Target state isn't generated yet.  Remedy this.
-            #
-            rv = self.makeState(state, sym)
-            self.edges[key] = rv
-        return rv
-
-    def gotoT(self, state, t):
-        return [self.goto(state, t)]
-
-    def gotoST(self, state, st):
-        rv = []
-        for t in self.states[state].T:
-            if st == t:
-                rv.append(self.goto(state, t))
-        return rv
-
-    def add(self, set, item, i=None, predecessor=None, causal=None):
-        if predecessor is None:
-            if item not in set:
-                set.append(item)
-        else:
-            key = (item, i)
-            if item not in set:
-                self.links[key] = []
-                set.append(item)
-            self.links[key].append((predecessor, causal))
-
-    def makeSet(self, token, sets, i):
-        cur, next = sets[i], sets[i+1]
-
-        ttype = token is not None and self.typestring(token) or None
-        if ttype is not None:
-            fn, arg = self.gotoT, ttype
-        else:
-            fn, arg = self.gotoST, token
-
-        for item in cur:
-            ptr = (item, i)
-            state, parent = item
-            add = fn(state, arg)
-            for k in add:
-                if k is not None:
-                    self.add(next, (k, parent), i+1, ptr)
-                    nk = self.goto(k, None)
-                    if nk is not None:
-                        self.add(next, (nk, i+1))
-
-            if parent == i:
-                continue
-
-            for rule in self.states[state].complete:
-                lhs, rhs = rule
-                for pitem in sets[parent]:
-                    pstate, pparent = pitem
-                    k = self.goto(pstate, lhs)
-                    if k is not None:
-                        why = (item, i, rule)
-                        pptr = (pitem, parent)
-                        self.add(cur, (k, pparent),
-                                 i, pptr, why)
-                        nk = self.goto(k, None)
-                        if nk is not None:
-                            self.add(cur, (nk, i))
-
-    def makeSet_fast(self, token, sets, i):
-        #
-        #  Call *only* when the entire state machine has been built!
-        #  It relies on self.edges being filled in completely, and
-        #  then duplicates and inlines code to boost speed at the
-        #  cost of extreme ugliness.
-        #
-        cur, next = sets[i], sets[i+1]
-        ttype = token is not None and self.typestring(token) or None
-
-        for item in cur:
-            ptr = (item, i)
-            state, parent = item
-            if ttype is not None:
-                k = self.edges.get((state, ttype), None)
-                if k is not None:
-                    #self.add(next, (k, parent), i+1, ptr)
-                    #INLINED --v
-                    new = (k, parent)
-                    key = (new, i+1)
-                    if new not in next:
-                        self.links[key] = []
-                        next.append(new)
-                    self.links[key].append((ptr, None))
-                    #INLINED --^
-                    #nk = self.goto(k, None)
-                    nk = self.edges.get((k, None), None)
-                    if nk is not None:
-                        #self.add(next, (nk, i+1))
-                        #INLINED --v
-                        new = (nk, i+1)
-                        if new not in next:
-                            next.append(new)
-                        #INLINED --^
-            else:
-                add = self.gotoST(state, token)
-                for k in add:
-                    if k is not None:
-                        self.add(next, (k, parent), i+1, ptr)
-                        #nk = self.goto(k, None)
-                        nk = self.edges.get((k, None), None)
-                        if nk is not None:
-                            self.add(next, (nk, i+1))
-
-            if parent == i:
-                continue
-
-            for rule in self.states[state].complete:
-                lhs, rhs = rule
-                for pitem in sets[parent]:
-                    pstate, pparent = pitem
-                    #k = self.goto(pstate, lhs)
-                    k = self.edges.get((pstate, lhs), None)
-                    if k is not None:
-                        why = (item, i, rule)
-                        pptr = (pitem, parent)
-                        #self.add(cur, (k, pparent),
-                        #        i, pptr, why)
-                        #INLINED --v
-                        new = (k, pparent)
-                        key = (new, i)
-                        if new not in cur:
-                            self.links[key] = []
-                            cur.append(new)
-                        self.links[key].append((pptr, why))
-                        #INLINED --^
-                        #nk = self.goto(k, None)
-                        nk = self.edges.get((k, None), None)
-                        if nk is not None:
-                            #self.add(cur, (nk, i))
-                            #INLINED --v
-                            new = (nk, i)
-                            if new not in cur:
-                                cur.append(new)
-                            #INLINED --^
-
-    def predecessor(self, key, causal):
-        for p, c in self.links[key]:
-            if c == causal:
-                return p
-        assert 0
-
-    def causal(self, key):
-        links = self.links[key]
-        if len(links) == 1:
-            return links[0][1]
-        choices = []
-        rule2cause = {}
-        for p, c in links:
-            rule = c[2]
-            choices.append(rule)
-            rule2cause[rule] = c
-        return rule2cause[self.ambiguity(choices)]
-
-    def deriveEpsilon(self, nt):
-        if len(self.newrules[nt]) > 1:
-            rule = self.ambiguity(self.newrules[nt])
-        else:
-            rule = self.newrules[nt][0]
-        #print rule
-
-        rhs = rule[1]
-        attr = [None] * len(rhs)
-
-        for i in range(len(rhs)-1, -1, -1):
-            attr[i] = self.deriveEpsilon(rhs[i])
-        return self.rule2func[self.new2old[rule]](attr)
-
-    def buildTree(self, nt, item, tokens, k):
-        state, parent = item
-
-        choices = []
-        for rule in self.states[state].complete:
-            if rule[0] == nt:
-                choices.append(rule)
-        rule = choices[0]
-        if len(choices) > 1:
-            rule = self.ambiguity(choices)
-        #print rule
-
-        rhs = rule[1]
-        attr = [None] * len(rhs)
-
-        for i in range(len(rhs)-1, -1, -1):
-            sym = rhs[i]
-            if not self.newrules.has_key(sym):
-                if sym != self._BOF:
-                    attr[i] = tokens[k-1]
-                    key = (item, k)
-                    item, k = self.predecessor(key, None)
-            #elif self.isnullable(sym):
-            elif self._NULLABLE == sym[0:len(self._NULLABLE)]:
-                attr[i] = self.deriveEpsilon(sym)
-            else:
-                key = (item, k)
-                why = self.causal(key)
-                attr[i] = self.buildTree(sym, why[0],
-                                         tokens, why[1])
-                item, k = self.predecessor(key, why)
-        return self.rule2func[self.new2old[rule]](attr)
-
-    def ambiguity(self, rules):
-        #
-        #  XXX - problem here and in collectRules() if the same rule
-        #        appears in >1 method.  Also undefined results if rules
-        #        causing the ambiguity appear in the same method.
-        #
-        sortlist = []
-        name2index = {}
-        for i in range(len(rules)):
-            lhs, rhs = rule = rules[i]
-            name = self.rule2name[self.new2old[rule]]
-            sortlist.append((len(rhs), name))
-            name2index[name] = i
-        sortlist.sort()
-        list = map(lambda (a,b): b, sortlist)
-        return rules[name2index[self.resolve(list)]]
-
-    def resolve(self, list):
-        #
-        #  Resolve ambiguity in favor of the shortest RHS.
-        #  Since we walk the tree from the top down, this
-        #  should effectively resolve in favor of a "shift".
-        #
-        return list[0]
-
-#
-#  GenericASTBuilder automagically constructs a concrete/abstract syntax tree
-#  for a given input.  The extra argument is a class (not an instance!)
-#  which supports the "__setslice__" and "__len__" methods.
-#
-#  XXX - silently overrides any user code in methods.
-#
-
-class GenericASTBuilder(GenericParser):
-    def __init__(self, AST, start):
-        GenericParser.__init__(self, start)
-        self.AST = AST
-
-    def preprocess(self, rule, func):
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to