Hello community, here is the log from the commit of package python-lark-parser for openSUSE:Factory checked in at 2019-09-23 12:08:10 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/python-lark-parser (Old) and /work/SRC/openSUSE:Factory/.python-lark-parser.new.7948 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-lark-parser" Mon Sep 23 12:08:10 2019 rev:5 rq:730144 version:0.7.5 Changes: -------- --- /work/SRC/openSUSE:Factory/python-lark-parser/python-lark-parser.changes 2019-09-02 13:24:38.277320468 +0200 +++ /work/SRC/openSUSE:Factory/.python-lark-parser.new.7948/python-lark-parser.changes 2019-09-23 12:08:14.173897188 +0200 @@ -1,0 +2,10 @@ +Wed Sep 11 13:06:32 UTC 2019 - Tomáš Chvátal <[email protected]> + +- Update to 0.7.5: + * Lark transformers can now visit tokens as wel + * Fixed long-standing non-determinism and prioritization bugs in Earley. + * Serialize tool now supports multiple start symbols + * iter_subtrees, find_data and find_pred methods are now included in standalone parser + * Bugfixes for the transformer interface, for the custom lexer, for grammar imports, and many more + +------------------------------------------------------------------- Old: ---- lark-parser-0.7.3.tar.gz New: ---- lark-parser-0.7.5.tar.gz ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ python-lark-parser.spec ++++++ --- /var/tmp/diff_new_pack.x3BsGa/_old 2019-09-23 12:08:15.433896980 +0200 +++ /var/tmp/diff_new_pack.x3BsGa/_new 2019-09-23 12:08:15.437896979 +0200 @@ -18,7 +18,7 @@ %{?!python_module:%define python_module() python-%{**} python3-%{**}} Name: python-lark-parser -Version: 0.7.3 +Version: 0.7.5 Release: 0 Summary: A parsing library for Python License: MIT ++++++ lark-parser-0.7.3.tar.gz -> lark-parser-0.7.5.tar.gz ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/README.md new/lark-0.7.5/README.md --- old/lark-0.7.3/README.md 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/README.md 2019-09-06 07:18:42.000000000 +0200 @@ -72,7 +72,7 @@  -See more [examples in the wiki](https://github.com/erezsh/lark/wiki/Examples) +See more [examples here](https://github.com/lark-parser/lark/tree/master/examples) @@ -95,7 +95,7 @@ - Extensive test suite [](https://codecov.io/gh/erezsh/lark) - And much more! -See the full list of [features in the wiki](https://github.com/erezsh/lark/wiki/Features) +See the full list of [features here](https://lark-parser.readthedocs.io/en/latest/features/) ### Comparison to other libraries diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/__init__.py new/lark-0.7.5/lark/__init__.py --- old/lark-0.7.3/lark/__init__.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/__init__.py 2019-09-06 07:18:42.000000000 +0200 @@ -5,4 +5,4 @@ from .lexer import Token from .lark import Lark -__version__ = "0.7.3" +__version__ = "0.7.5" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/lexer.py new/lark-0.7.5/lark/lexer.py --- old/lark-0.7.3/lark/lexer.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/lexer.py 2019-09-06 07:18:42.000000000 +0200 @@ -101,7 +101,7 @@ self.type = type_ self.pos_in_stream = pos_in_stream - self.value = Str(value) + self.value = value self.line = line self.column = column self.end_line = end_line @@ -268,7 +268,7 @@ return _build_mres(terminals, len(terminals), match_whole) def _regexp_has_newline(r): - """Expressions that may indicate newlines in a regexp: + r"""Expressions that may indicate newlines in a regexp: - newlines (\n) - escaped newline (\\n) - anything but ([^...]) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/load_grammar.py new/lark-0.7.5/lark/load_grammar.py --- old/lark-0.7.3/lark/load_grammar.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/load_grammar.py 2019-09-06 07:18:42.000000000 +0200 @@ -12,7 +12,7 @@ from .parser_frontends import LALR_TraditionalLexer from .common import LexerConf, ParserConf from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol -from .utils import classify, suppress, dedup_list +from .utils import classify, suppress, dedup_list, Str from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken from .tree import Tree, SlottedTree as ST @@ -351,7 +351,10 @@ for n in i: w += n if n == '\\': - n2 = next(i) + try: + n2 = next(i) + except StopIteration: + raise ValueError("Literal ended unexpectedly (bad escaping): `%r`" % s) if n2 == '\\': w += '\\\\' elif n2 not in 'uxnftr': @@ -451,9 +454,9 @@ if isinstance(v, Tree): return v elif v.type == 'RULE': - return NonTerminal(v.value) + return NonTerminal(Str(v.value)) elif v.type == 'TERMINAL': - return Terminal(v.value, filter_out=v.startswith('_')) + return Terminal(Str(v.value), filter_out=v.startswith('_')) assert False def _choice_of_rules(rules): @@ -511,12 +514,12 @@ simplify_rule = SimplifyRule_Visitor() compiled_rules = [] - for i, rule_content in enumerate(rules): + for rule_content in rules: name, tree, options = rule_content simplify_rule.visit(tree) expansions = rule_tree_to_text.transform(tree) - for expansion, alias in expansions: + for i, (expansion, alias) in enumerate(expansions): if alias and name.startswith('_'): raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)" % (name, alias)) @@ -605,7 +608,9 @@ _, tree, _ = imported_rules[symbol] except KeyError: raise GrammarError("Missing symbol '%s' in grammar %s" % (symbol, namespace)) - return tree.scan_values(lambda x: x.type in ('RULE', 'TERMINAL')) + + return _find_used_symbols(tree) + def get_namespace_name(name): try: @@ -682,6 +687,11 @@ return name +def _find_used_symbols(tree): + assert tree.data == 'expansions' + return {t for x in tree.find_data('expansion') + for t in x.scan_values(lambda t: t.type in ('RULE', 'TERMINAL'))} + class GrammarLoader: def __init__(self): terminals = [TerminalDef(name, PatternRE(value)) for name, value in TERMINALS.items()] @@ -843,9 +853,7 @@ rule_names.add(name) for name, expansions, _o in rules: - used_symbols = {t for x in expansions.find_data('expansion') - for t in x.scan_values(lambda t: t.type in ('RULE', 'TERMINAL'))} - for sym in used_symbols: + for sym in _find_used_symbols(expansions): if sym.type == 'TERMINAL': if sym not in terminal_names: raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name)) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/parser_frontends.py new/lark-0.7.5/lark/parser_frontends.py --- old/lark-0.7.3/lark/parser_frontends.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/parser_frontends.py 2019-09-06 07:18:42.000000000 +0200 @@ -118,7 +118,7 @@ class LALR_CustomLexer(LALR_WithLexer): def __init__(self, lexer_cls, lexer_conf, parser_conf, options=None): - self.lexer = lexer_cls(self.lexer_conf) + self.lexer = lexer_cls(lexer_conf) debug = options.debug if options else False self.parser = LALR_Parser(parser_conf, debug=debug) WithLexer.__init__(self, lexer_conf, parser_conf, options) @@ -139,7 +139,8 @@ self.init_traditional_lexer() resolve_ambiguity = options.ambiguity == 'resolve' - self.parser = earley.Parser(parser_conf, self.match, resolve_ambiguity=resolve_ambiguity) + debug = options.debug if options else False + self.parser = earley.Parser(parser_conf, self.match, resolve_ambiguity=resolve_ambiguity, debug=debug) def match(self, term, token): return term.name == token.type @@ -152,10 +153,12 @@ self._prepare_match(lexer_conf) resolve_ambiguity = options.ambiguity == 'resolve' + debug = options.debug if options else False self.parser = xearley.Parser(parser_conf, self.match, ignore=lexer_conf.ignore, resolve_ambiguity=resolve_ambiguity, + debug=debug, **kw ) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/parsers/earley.py new/lark-0.7.5/lark/parsers/earley.py --- old/lark-0.7.3/lark/parsers/earley.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/parsers/earley.py 2019-09-06 07:18:42.000000000 +0200 @@ -10,6 +10,7 @@ http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/ """ +import logging from collections import deque from ..visitors import Transformer_InPlace, v_args @@ -20,10 +21,11 @@ from .earley_forest import ForestToTreeVisitor, ForestSumVisitor, SymbolNode, ForestToAmbiguousTreeVisitor class Parser: - def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True): + def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, debug=False): analysis = GrammarAnalyzer(parser_conf) self.parser_conf = parser_conf self.resolve_ambiguity = resolve_ambiguity + self.debug = debug self.FIRST = analysis.FIRST self.NULLABLE = analysis.NULLABLE @@ -296,6 +298,15 @@ # symbol should have been completed in the last step of the Earley cycle, and will be in # this column. Find the item for the start_symbol, which is the root of the SPPF tree. solutions = [n.node for n in columns[-1] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0] + if self.debug: + from .earley_forest import ForestToPyDotVisitor + try: + debug_walker = ForestToPyDotVisitor() + except ImportError: + logging.warning("Cannot find dependency 'pydot', will not generate sppf debug image") + else: + debug_walker.visit(solutions[0], "sppf.png") + if not solutions: expected_tokens = [t.expect for t in to_scan] diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/parsers/earley_forest.py new/lark-0.7.5/lark/parsers/earley_forest.py --- old/lark-0.7.3/lark/parsers/earley_forest.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/parsers/earley_forest.py 2019-09-06 07:18:42.000000000 +0200 @@ -122,7 +122,7 @@ ambiguously. Hence, we use the sort order to identify the order in which ambiguous children should be considered. """ - return self.is_empty, -self.priority, -self.rule.order + return self.is_empty, -self.priority, self.rule.order def __iter__(self): return iter([self.left, self.right]) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/parsers/xearley.py new/lark-0.7.5/lark/parsers/xearley.py --- old/lark-0.7.3/lark/parsers/xearley.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/parsers/xearley.py 2019-09-06 07:18:42.000000000 +0200 @@ -24,8 +24,8 @@ class Parser(BaseParser): - def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, ignore = (), complete_lex = False): - BaseParser.__init__(self, parser_conf, term_matcher, resolve_ambiguity) + def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, ignore = (), complete_lex = False, debug=False): + BaseParser.__init__(self, parser_conf, term_matcher, resolve_ambiguity, debug) self.ignore = [Terminal(t) for t in ignore] self.complete_lex = complete_lex diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/tools/serialize.py new/lark-0.7.5/lark/tools/serialize.py --- old/lark-0.7.3/lark/tools/serialize.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/tools/serialize.py 2019-09-06 07:18:42.000000000 +0200 @@ -12,7 +12,7 @@ argparser.add_argument('grammar_file', type=argparse.FileType('r'), help='A valid .lark file') argparser.add_argument('-o', '--out', type=argparse.FileType('w'), default=sys.stdout, help='json file path to create (default=stdout)') -argparser.add_argument('-s', '--start', default='start', help='start symbol (default="start")') +argparser.add_argument('-s', '--start', default='start', help='start symbol (default="start")', nargs='+') argparser.add_argument('-l', '--lexer', default='standard', choices=['standard', 'contextual'], help='lexer type (default="standard")') @@ -33,8 +33,7 @@ argparser.print_help() else: args = argparser.parse_args() - - serialize(args.grammar_file, args.out, args.lexer, args.start) + serialize(args.grammar_file, args.out, args.lexer, args.start) if __name__ == '__main__': main() \ No newline at end of file diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/tree.py new/lark-0.7.5/lark/tree.py --- old/lark-0.7.3/lark/tree.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/tree.py 2019-09-06 07:18:42.000000000 +0200 @@ -56,30 +56,6 @@ def __hash__(self): return hash((self.data, tuple(self.children))) -###} - - def expand_kids_by_index(self, *indices): - "Expand (inline) children at the given indices" - for i in sorted(indices, reverse=True): # reverse so that changing tail won't affect indices - kid = self.children[i] - self.children[i:i+1] = kid.children - - def find_pred(self, pred): - "Find all nodes where pred(tree) == True" - return filter(pred, self.iter_subtrees()) - - def find_data(self, data): - "Find all nodes where tree.data == data" - return self.find_pred(lambda t: t.data == data) - - def scan_values(self, pred): - for c in self.children: - if isinstance(c, Tree): - for t in c.scan_values(pred): - yield t - else: - if pred(c): - yield c def iter_subtrees(self): # TODO: Re-write as a more efficient version @@ -102,6 +78,31 @@ yield x seen.add(id(x)) + def find_pred(self, pred): + "Find all nodes where pred(tree) == True" + return filter(pred, self.iter_subtrees()) + + def find_data(self, data): + "Find all nodes where tree.data == data" + return self.find_pred(lambda t: t.data == data) + +###} + + def expand_kids_by_index(self, *indices): + "Expand (inline) children at the given indices" + for i in sorted(indices, reverse=True): # reverse so that changing tail won't affect indices + kid = self.children[i] + self.children[i:i+1] = kid.children + + def scan_values(self, pred): + for c in self.children: + if isinstance(c, Tree): + for t in c.scan_values(pred): + yield t + else: + if pred(c): + yield c + def iter_subtrees_topdown(self): stack = [self] while stack: diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/utils.py new/lark-0.7.5/lark/utils.py --- old/lark-0.7.3/lark/utils.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/utils.py 2019-09-06 07:18:42.000000000 +0200 @@ -160,7 +160,7 @@ elif isinstance(f, partial): # wraps does not work for partials in 2.7: https://bugs.python.org/issue3445 - return create_decorator(f.__func__, True) + return wraps(f.func)(create_decorator(lambda *args, **kw: f(*args[1:], **kw), True)) else: return create_decorator(f.__func__.__call__, True) @@ -172,7 +172,7 @@ import sre_constants def get_regexp_width(regexp): try: - return sre_parse.parse(regexp).getwidth() + return [int(x) for x in sre_parse.parse(regexp).getwidth()] except sre_constants.error: raise ValueError(regexp) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/lark/visitors.py new/lark-0.7.5/lark/visitors.py --- old/lark-0.7.3/lark/visitors.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/lark/visitors.py 2019-09-06 07:18:42.000000000 +0200 @@ -3,6 +3,7 @@ from .utils import smart_decorator from .tree import Tree from .exceptions import VisitError, GrammarError +from .lexer import Token ###{standalone from inspect import getmembers, getmro @@ -21,6 +22,10 @@ Can be used to implement map or reduce. """ + __visit_tokens__ = False # For backwards compatibility + def __init__(self, visit_tokens=False): + self.__visit_tokens__ = visit_tokens + def _call_userfunc(self, tree, new_children=None): # Assumes tree is already transformed children = new_children if new_children is not None else tree.children @@ -45,10 +50,29 @@ except Exception as e: raise VisitError(tree, e) + def _call_userfunc_token(self, token): + try: + f = getattr(self, token.type) + except AttributeError: + return self.__default_token__(token) + else: + try: + return f(token) + except (GrammarError, Discard): + raise + except Exception as e: + raise VisitError(token, e) + + def _transform_children(self, children): for c in children: try: - yield self._transform_tree(c) if isinstance(c, Tree) else c + if isinstance(c, Tree): + yield self._transform_tree(c) + elif self.__visit_tokens__ and isinstance(c, Token): + yield self._call_userfunc_token(c) + else: + yield c except Discard: pass @@ -66,6 +90,11 @@ "Default operation on tree (for override)" return Tree(data, children, meta) + def __default_token__(self, token): + "Default operation on token (for override)" + return token + + @classmethod def _apply_decorator(cls, decorator, **kwargs): mro = getmro(cls) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/readthedocs.yml new/lark-0.7.5/readthedocs.yml --- old/lark-0.7.3/readthedocs.yml 1970-01-01 01:00:00.000000000 +0100 +++ new/lark-0.7.5/readthedocs.yml 2019-09-06 07:18:42.000000000 +0200 @@ -0,0 +1,10 @@ +version: 2 + +mkdocs: + configuration: mkdocs.yml + fail_on_warning: false + +formats: all + +python: + version: 3.5 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/tests/__main__.py new/lark-0.7.5/tests/__main__.py --- old/lark-0.7.3/tests/__main__.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/tests/__main__.py 2019-09-06 07:18:42.000000000 +0200 @@ -21,6 +21,7 @@ TestCykStandard, TestLalrContextual, TestEarleyDynamic, + TestLalrCustom, # TestFullEarleyStandard, TestFullEarleyDynamic, diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/tests/test_parser.py new/lark-0.7.5/tests/test_parser.py --- old/lark-0.7.3/tests/test_parser.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/tests/test_parser.py 2019-09-06 07:18:42.000000000 +0200 @@ -22,7 +22,7 @@ from lark.tree import Tree from lark.visitors import Transformer, Transformer_InPlace, v_args from lark.grammar import Rule -from lark.lexer import TerminalDef +from lark.lexer import TerminalDef, Lexer, TraditionalLexer __path__ = os.path.dirname(__file__) def _read(n, *args): @@ -94,6 +94,24 @@ r = g.parse('xx') self.assertEqual( r.children[0].data, "c" ) + def test_visit_tokens(self): + class T(Transformer): + def a(self, children): + return children[0] + "!" + def A(self, tok): + return tok.upper() + + # Test regular + g = Lark("""start: a + a : A + A: "x" + """, parser='lalr') + r = T().transform(g.parse("x")) + self.assertEqual( r.children, ["x!"] ) + r = T(True).transform(g.parse("x")) + self.assertEqual( r.children, ["X!"] ) + + def test_embedded_transformer(self): class T(Transformer): def a(self, children): @@ -431,12 +449,22 @@ _TestFullEarley.__name__ = _NAME globals()[_NAME] = _TestFullEarley +class CustomLexer(Lexer): + """ + Purpose of this custom lexer is to test the integration, + so it uses the traditionalparser as implementation without custom lexing behaviour. + """ + def __init__(self, lexer_conf): + self.lexer = TraditionalLexer(lexer_conf.tokens, ignore=lexer_conf.ignore, user_callbacks=lexer_conf.callbacks) + def lex(self, *args, **kwargs): + return self.lexer.lex(*args, **kwargs) def _make_parser_test(LEXER, PARSER): + lexer_class_or_name = CustomLexer if LEXER == 'custom' else LEXER def _Lark(grammar, **kwargs): - return Lark(grammar, lexer=LEXER, parser=PARSER, propagate_positions=True, **kwargs) + return Lark(grammar, lexer=lexer_class_or_name, parser=PARSER, propagate_positions=True, **kwargs) def _Lark_open(gfilename, **kwargs): - return Lark.open(gfilename, lexer=LEXER, parser=PARSER, propagate_positions=True, **kwargs) + return Lark.open(gfilename, lexer=lexer_class_or_name, parser=PARSER, propagate_positions=True, **kwargs) class _TestParser(unittest.TestCase): def test_basic1(self): g = _Lark("""start: a+ b a* "b" a* @@ -1532,7 +1560,7 @@ parser = _Lark(grammar) - @unittest.skipIf(PARSER!='lalr', "Serialize currently only works for LALR parsers (though it should be easy to extend)") + @unittest.skipIf(PARSER!='lalr' or LEXER=='custom', "Serialize currently only works for LALR parsers without custom lexers (though it should be easy to extend)") def test_serialize(self): grammar = """ start: _ANY b "C" @@ -1594,6 +1622,7 @@ ('dynamic_complete', 'earley'), ('standard', 'lalr'), ('contextual', 'lalr'), + ('custom', 'lalr'), # (None, 'earley'), ] diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/lark-0.7.3/tests/test_trees.py new/lark-0.7.5/tests/test_trees.py --- old/lark-0.7.3/tests/test_trees.py 2019-08-14 11:31:43.000000000 +0200 +++ new/lark-0.7.5/tests/test_trees.py 2019-09-06 07:18:42.000000000 +0200 @@ -4,6 +4,7 @@ from unittest import TestCase import copy import pickle +import functools from lark.tree import Tree from lark.visitors import Transformer, Interpreter, visit_children_decor, v_args, Discard @@ -146,6 +147,22 @@ res = T().transform(t) self.assertEqual(res, 2.9) + def test_partial(self): + + tree = Tree("start", [Tree("a", ["test1"]), Tree("b", ["test2"])]) + + def test(prefix, s, postfix): + return prefix + s.upper() + postfix + + @v_args(inline=True) + class T(Transformer): + a = functools.partial(test, "@", postfix="!") + b = functools.partial(lambda s: s + "!") + + res = T().transform(tree) + assert res.children == ["@TEST1!", "test2!"] + + def test_discard(self): class MyTransformer(Transformer): def a(self, args):
