Hello community,

here is the log from the commit of package python-parso for openSUSE:Factory 
checked in at 2019-12-29 15:49:24
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-parso (Old)
 and      /work/SRC/openSUSE:Factory/.python-parso.new.6675 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-parso"

Sun Dec 29 15:49:24 2019 rev:10 rq:759718 version:0.5.2

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-parso/python-parso.changes        
2019-07-21 11:28:56.444832074 +0200
+++ /work/SRC/openSUSE:Factory/.python-parso.new.6675/python-parso.changes      
2019-12-29 15:49:26.587169274 +0100
@@ -1,0 +2,8 @@
+Fri Dec 27 17:14:25 UTC 2019 - Ondřej Súkup <mimi...@gmail.com>
+
+- update to 0.5.2
+ * Add include_setitem to get_definition/is_definition and get_defined_names
+ * Fix named expression error listing
+ * Fix some f-string tokenizer issues
+
+-------------------------------------------------------------------

Old:
----
  parso-0.5.1.tar.gz

New:
----
  parso-0.5.2.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-parso.spec ++++++
--- /var/tmp/diff_new_pack.VRSlpL/_old  2019-12-29 15:49:27.911169878 +0100
+++ /var/tmp/diff_new_pack.VRSlpL/_new  2019-12-29 15:49:27.935169889 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package python-parso
 #
-# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2019 SUSE LLC
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -18,7 +18,7 @@
 
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-parso
-Version:        0.5.1
+Version:        0.5.2
 Release:        0
 Summary:        An autocompletion tool for Python
 License:        MIT AND Python-2.0

++++++ parso-0.5.1.tar.gz -> parso-0.5.2.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/CHANGELOG.rst 
new/parso-0.5.2/CHANGELOG.rst
--- old/parso-0.5.1/CHANGELOG.rst       2019-07-13 15:47:10.000000000 +0200
+++ new/parso-0.5.2/CHANGELOG.rst       2019-12-15 01:01:11.000000000 +0100
@@ -3,6 +3,13 @@
 Changelog
 ---------
 
+0.5.2 (2019-12-15)
+++++++++++++++++++
+
+- Add include_setitem to get_definition/is_definition and get_defined_names 
(#66)
+- Fix named expression error listing (#89, #90)
+- Fix some f-string tokenizer issues (#93)
+
 0.5.1 (2019-07-13)
 ++++++++++++++++++
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/PKG-INFO new/parso-0.5.2/PKG-INFO
--- old/parso-0.5.1/PKG-INFO    2019-07-13 15:50:20.000000000 +0200
+++ new/parso-0.5.2/PKG-INFO    2019-12-15 01:01:27.000000000 +0100
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: parso
-Version: 0.5.1
+Version: 0.5.2
 Summary: A Python Parser
 Home-page: https://github.com/davidhalter/parso
 Author: David Halter
@@ -106,6 +106,13 @@
         Changelog
         ---------
         
+        0.5.2 (2019-12-15)
+        ++++++++++++++++++
+        
+        - Add include_setitem to get_definition/is_definition and 
get_defined_names (#66)
+        - Fix named expression error listing (#89, #90)
+        - Fix some f-string tokenizer issues (#93)
+        
         0.5.1 (2019-07-13)
         ++++++++++++++++++
         
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/conftest.py new/parso-0.5.2/conftest.py
--- old/parso-0.5.1/conftest.py 2019-07-13 15:47:10.000000000 +0200
+++ new/parso-0.5.2/conftest.py 2019-12-15 01:01:11.000000000 +0100
@@ -58,7 +58,9 @@
     elif 'each_py3_version' in metafunc.fixturenames:
         metafunc.parametrize('each_py3_version', VERSIONS_3)
     elif 'version_ge_py36' in metafunc.fixturenames:
-        metafunc.parametrize('version_ge_py36', ['3.6', '3.7'])
+        metafunc.parametrize('version_ge_py36', ['3.6', '3.7', '3.8'])
+    elif 'version_ge_py38' in metafunc.fixturenames:
+        metafunc.parametrize('version_ge_py38', ['3.8'])
 
 
 class NormalizerIssueCase(object):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/parso/__init__.py 
new/parso-0.5.2/parso/__init__.py
--- old/parso-0.5.1/parso/__init__.py   2019-07-13 15:47:10.000000000 +0200
+++ new/parso-0.5.2/parso/__init__.py   2019-12-15 01:01:11.000000000 +0100
@@ -43,7 +43,7 @@
 from parso.utils import split_lines, python_bytes_to_unicode
 
 
-__version__ = '0.5.1'
+__version__ = '0.5.2'
 
 
 def parse(code=None, **kwargs):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/parso/pgen2/generator.py 
new/parso-0.5.2/parso/pgen2/generator.py
--- old/parso-0.5.1/parso/pgen2/generator.py    2019-07-13 15:47:10.000000000 
+0200
+++ new/parso-0.5.2/parso/pgen2/generator.py    2019-12-15 01:01:11.000000000 
+0100
@@ -309,13 +309,39 @@
             _calculate_first_plans(nonterminal_to_dfas, first_plans, 
nonterminal)
 
     # Now that we have calculated the first terminals, we are sure that
-    # there is no left recursion or ambiguities.
+    # there is no left recursion.
 
     for dfas in nonterminal_to_dfas.values():
         for dfa_state in dfas:
+            transitions = dfa_state.transitions
             for nonterminal, next_dfa in dfa_state.nonterminal_arcs.items():
                 for transition, pushes in first_plans[nonterminal].items():
-                    dfa_state.transitions[transition] = DFAPlan(next_dfa, 
pushes)
+                    if transition in transitions:
+                        prev_plan = transitions[transition]
+                        # Make sure these are sorted so that error messages are
+                        # at least deterministic
+                        choices = sorted([
+                            (
+                                prev_plan.dfa_pushes[0].from_rule
+                                if prev_plan.dfa_pushes
+                                else prev_plan.next_dfa.from_rule
+                            ),
+                            (
+                                pushes[0].from_rule
+                                if pushes else next_dfa.from_rule
+                            ),
+                        ])
+                        raise ValueError(
+                            "Rule %s is ambiguous; given a %s token, we "
+                            "can't determine if we should evaluate %s or %s."
+                            % (
+                                (
+                                    dfa_state.from_rule,
+                                    transition,
+                                ) + tuple(choices)
+                            )
+                        )
+                    transitions[transition] = DFAPlan(next_dfa, pushes)
 
 
 def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal):
@@ -345,13 +371,6 @@
                 raise ValueError("left recursion for rule %r" % nonterminal)
 
         for t, pushes in first_plans2.items():
-            check = new_first_plans.get(t)
-            if check is not None:
-                raise ValueError(
-                    "Rule %s is ambiguous; %s is the"
-                    " start of the rule %s as well as %s."
-                    % (nonterminal, t, nonterminal2, check[-1].from_rule)
-                )
             new_first_plans[t] = [next_] + pushes
 
     first_plans[nonterminal] = new_first_plans
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/parso/pgen2/grammar_parser.py 
new/parso-0.5.2/parso/pgen2/grammar_parser.py
--- old/parso-0.5.1/parso/pgen2/grammar_parser.py       2019-07-13 
15:47:10.000000000 +0200
+++ new/parso-0.5.2/parso/pgen2/grammar_parser.py       2019-12-15 
01:01:11.000000000 +0100
@@ -141,6 +141,9 @@
         self.next = next_
         self.nonterminal_or_string = nonterminal_or_string
 
+    def __repr__(self):
+        return '<%s: %s>' % (self.__class__.__name__, 
self.nonterminal_or_string)
+
 
 class NFAState(object):
     def __init__(self, from_rule):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/parso/python/errors.py 
new/parso-0.5.2/parso/python/errors.py
--- old/parso-0.5.1/parso/python/errors.py      2019-07-13 15:47:10.000000000 
+0200
+++ new/parso-0.5.2/parso/python/errors.py      2019-12-15 01:01:11.000000000 
+0100
@@ -52,7 +52,7 @@
     # It looks like a __future__ import that is relative is still a future
     # import. That feels kind of odd, but whatever.
     # if import_from.level != 0:
-        # return False
+    #     return False
     from_names = import_from.get_from_names()
     return [n.value for n in from_names] == ['__future__']
 
@@ -94,19 +94,32 @@
 
 
 def _iter_definition_exprs_from_lists(exprlist):
-    for child in exprlist.children[::2]:
-        if child.type == 'atom' and child.children[0] in ('(', '['):
-            testlist_comp = child.children[0]
-            if testlist_comp.type == 'testlist_comp':
-                for expr in _iter_definition_exprs_from_lists(testlist_comp):
-                    yield expr
-                continue
+    def check_expr(child):
+        if child.type == 'atom':
+            if child.children[0] == '(':
+                testlist_comp = child.children[1]
+                if testlist_comp.type == 'testlist_comp':
+                    for expr in 
_iter_definition_exprs_from_lists(testlist_comp):
+                        yield expr
+                    return
+                else:
+                    # It's a paren that doesn't do anything, like 1 + (1)
+                    for c in check_expr(testlist_comp):
+                        yield c
+                    return
             elif child.children[0] == '[':
                 yield testlist_comp
-                continue
-
+                return
         yield child
 
+    if exprlist.type in _STAR_EXPR_PARENTS:
+        for child in exprlist.children[::2]:
+            for c in check_expr(child):  # Python 2 sucks
+                yield c
+    else:
+        for c in check_expr(exprlist):  # Python 2 sucks
+            yield c
+
 
 def _get_expr_stmt_definition_exprs(expr_stmt):
     exprs = []
@@ -120,8 +133,6 @@
 
 def _get_for_stmt_definition_exprs(for_stmt):
     exprlist = for_stmt.children[1]
-    if exprlist.type != 'exprlist':
-        return [exprlist]
     return list(_iter_definition_exprs_from_lists(exprlist))
 
 
@@ -478,38 +489,38 @@
     message = "bytes can only contain ASCII literal characters."
 
     def is_issue(self, leaf):
-            string_prefix = leaf.string_prefix.lower()
-            if 'b' in string_prefix \
-                    and self._normalizer.version >= (3, 0) \
-                    and any(c for c in leaf.value if ord(c) > 127):
-                # b'ä'
-                return True
+        string_prefix = leaf.string_prefix.lower()
+        if 'b' in string_prefix \
+                and self._normalizer.version >= (3, 0) \
+                and any(c for c in leaf.value if ord(c) > 127):
+            # b'ä'
+            return True
 
-            if 'r' not in string_prefix:
-                # Raw strings don't need to be checked if they have proper
-                # escaping.
-                is_bytes = self._normalizer.version < (3, 0)
-                if 'b' in string_prefix:
-                    is_bytes = True
-                if 'u' in string_prefix:
-                    is_bytes = False
-
-                payload = leaf._get_payload()
-                if is_bytes:
-                    payload = payload.encode('utf-8')
-                    func = codecs.escape_decode
-                else:
-                    func = codecs.unicode_escape_decode
+        if 'r' not in string_prefix:
+            # Raw strings don't need to be checked if they have proper
+            # escaping.
+            is_bytes = self._normalizer.version < (3, 0)
+            if 'b' in string_prefix:
+                is_bytes = True
+            if 'u' in string_prefix:
+                is_bytes = False
+
+            payload = leaf._get_payload()
+            if is_bytes:
+                payload = payload.encode('utf-8')
+                func = codecs.escape_decode
+            else:
+                func = codecs.unicode_escape_decode
 
-                try:
-                    with warnings.catch_warnings():
-                        # The warnings from parsing strings are not relevant.
-                        warnings.filterwarnings('ignore')
-                        func(payload)
-                except UnicodeDecodeError as e:
-                    self.add_issue(leaf, message='(unicode error) ' + str(e))
-                except ValueError as e:
-                    self.add_issue(leaf, message='(value error) ' + str(e))
+            try:
+                with warnings.catch_warnings():
+                    # The warnings from parsing strings are not relevant.
+                    warnings.filterwarnings('ignore')
+                    func(payload)
+            except UnicodeDecodeError as e:
+                self.add_issue(leaf, message='(unicode error) ' + str(e))
+            except ValueError as e:
+                self.add_issue(leaf, message='(value error) ' + str(e))
 
 
 @ErrorFinder.register_rule(value='*')
@@ -586,7 +597,7 @@
     message = "trailing comma not allowed without surrounding parentheses"
 
     def is_issue(self, node):
-        if node.children[-1] == ',':
+        if node.children[-1] == ',' and node.parent.children[-1] != ')':
             return True
 
 
@@ -883,7 +894,7 @@
 
 
 class _CheckAssignmentRule(SyntaxRule):
-    def _check_assignment(self, node, is_deletion=False):
+    def _check_assignment(self, node, is_deletion=False, is_namedexpr=False):
         error = None
         type_ = node.type
         if type_ == 'lambdef':
@@ -907,9 +918,9 @@
                         # This is not a comprehension, they were handled
                         # further above.
                         for child in second.children[::2]:
-                            self._check_assignment(child, is_deletion)
+                            self._check_assignment(child, is_deletion, 
is_namedexpr)
                     else:  # Everything handled, must be useless brackets.
-                        self._check_assignment(second, is_deletion)
+                        self._check_assignment(second, is_deletion, 
is_namedexpr)
         elif type_ == 'keyword':
             if self._normalizer.version < (3, 8):
                 error = 'keyword'
@@ -939,17 +950,24 @@
                 assert trailer.type == 'trailer'
                 if trailer.children[0] == '(':
                     error = 'function call'
+                elif is_namedexpr and trailer.children[0] == '[':
+                    error = 'subscript'
+                elif is_namedexpr and trailer.children[0] == '.':
+                    error = 'attribute'
         elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
             for child in node.children[::2]:
-                self._check_assignment(child, is_deletion)
+                self._check_assignment(child, is_deletion, is_namedexpr)
         elif ('expr' in type_ and type_ != 'star_expr'  # is a substring
               or '_test' in type_
               or type_ in ('term', 'factor')):
             error = 'operator'
 
         if error is not None:
-            cannot = "can't" if self._normalizer.version < (3, 8) else "cannot"
-            message = ' '.join([cannot, "delete" if is_deletion else "assign 
to", error])
+            if is_namedexpr:
+                message = 'cannot use named assignment with %s' % error
+            else:
+                cannot = "can't" if self._normalizer.version < (3, 8) else 
"cannot"
+                message = ' '.join([cannot, "delete" if is_deletion else 
"assign to", error])
             self.add_issue(node, message=message)
 
 
@@ -959,7 +977,6 @@
 
     def is_issue(self, node):
         expr_list = node.children[1]
-        print(expr_list)
         if expr_list.type != 'expr_list':  # Already handled.
             self._check_assignment(expr_list)
 
@@ -1009,3 +1026,71 @@
         expr_list = for_stmt.children[1]
         if expr_list.type != 'expr_list':  # Already handled.
             self._check_assignment(expr_list)
+
+
+@ErrorFinder.register_rule(type='namedexpr_test')
+class _NamedExprRule(_CheckAssignmentRule):
+    # namedexpr_test: test [':=' test]
+
+    def is_issue(self, namedexpr_test):
+        # assigned name
+        first = namedexpr_test.children[0]
+
+        def search_namedexpr_in_comp_for(node):
+            while True:
+                parent = node.parent
+                if parent is None:
+                    return parent
+                if parent.type == 'sync_comp_for' and parent.children[3] == 
node:
+                    return parent
+                node = parent
+
+        if search_namedexpr_in_comp_for(namedexpr_test):
+            # [i+1 for i in (i := range(5))]
+            # [i+1 for i in (j := range(5))]
+            # [i+1 for i in (lambda: (j := range(5)))()]
+            message = 'assignment expression cannot be used in a comprehension 
iterable expression'
+            self.add_issue(namedexpr_test, message=message)
+
+        # defined names
+        exprlist = list()
+
+        def process_comp_for(comp_for):
+            if comp_for.type == 'sync_comp_for':
+                comp = comp_for
+            elif comp_for.type == 'comp_for':
+                comp = comp_for.children[1]
+            exprlist.extend(_get_for_stmt_definition_exprs(comp))
+
+        def search_all_comp_ancestors(node):
+            has_ancestors = False
+            while True:
+                node = search_ancestor(node, 'testlist_comp', 'dictorsetmaker')
+                if node is None:
+                    break
+                for child in node.children:
+                    if child.type in _COMP_FOR_TYPES:
+                        process_comp_for(child)
+                        has_ancestors = True
+                        break
+            return has_ancestors
+
+        # check assignment expressions in comprehensions
+        search_all = search_all_comp_ancestors(namedexpr_test)
+        if search_all:
+            if self._normalizer.context.node.type == 'classdef':
+                message = 'assignment expression within a comprehension ' \
+                          'cannot be used in a class body'
+                self.add_issue(namedexpr_test, message=message)
+
+            namelist = [expr.value for expr in exprlist if expr.type == 'name']
+            if first.type == 'name' and first.value in namelist:
+                # [i := 0 for i, j in range(5)]
+                # [[(i := i) for j in range(5)] for i in range(5)]
+                # [i for i, j in range(5) if True or (i := 1)]
+                # [False and (i := 0) for i, j in range(5)]
+                message = 'assignment expression cannot rebind ' \
+                          'comprehension iteration variable %r' % first.value
+                self.add_issue(namedexpr_test, message=message)
+
+        self._check_assignment(first, is_namedexpr=True)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/parso/python/grammar39.txt 
new/parso-0.5.2/parso/python/grammar39.txt
--- old/parso-0.5.1/parso/python/grammar39.txt  1970-01-01 01:00:00.000000000 
+0100
+++ new/parso-0.5.2/parso/python/grammar39.txt  2019-12-15 01:01:11.000000000 
+0100
@@ -0,0 +1,171 @@
+# Grammar for Python
+
+# NOTE WELL: You should also follow all the steps listed at
+# https://devguide.python.org/grammar/
+
+# Start symbols for the grammar:
+#       single_input is a single interactive statement;
+#       file_input is a module or sequence of commands read from an input file;
+#       eval_input is the input for the eval() functions.
+# NB: compound_stmt in single_input is followed by extra NEWLINE!
+single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+file_input: (NEWLINE | stmt)* ENDMARKER
+eval_input: testlist NEWLINE* ENDMARKER
+
+decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+decorators: decorator+
+decorated: decorators (classdef | funcdef | async_funcdef)
+
+async_funcdef: 'async' funcdef
+funcdef: 'def' NAME parameters ['->' test] ':' suite
+
+parameters: '(' [typedargslist] ')'
+typedargslist: (
+  (tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] 
(
+        ',' tfpdef ['=' test])* ([',' [
+        '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
+      | '**' tfpdef [',']]])
+  | '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
+  | '**' tfpdef [',']]] )
+|  (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
+        '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
+      | '**' tfpdef [',']]]
+  | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
+  | '**' tfpdef [','])
+)
+tfpdef: NAME [':' test]
+varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef 
['=' test] (',' vfpdef ['=' test])* [',' [
+        '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+      | '**' vfpdef [',']]]
+  | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+  | '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
+        '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+      | '**' vfpdef [',']]]
+  | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+  | '**' vfpdef [',']
+)
+vfpdef: NAME
+
+stmt: simple_stmt | compound_stmt
+simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
+             import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
+expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
+                     ('=' (yield_expr|testlist_star_expr))*)
+annassign: ':' test ['=' test]
+testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
+augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+            '<<=' | '>>=' | '**=' | '//=')
+# For normal and annotated assignments, additional restrictions enforced by 
the interpreter
+del_stmt: 'del' exprlist
+pass_stmt: 'pass'
+flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+break_stmt: 'break'
+continue_stmt: 'continue'
+return_stmt: 'return' [testlist_star_expr]
+yield_stmt: yield_expr
+raise_stmt: 'raise' [test ['from' test]]
+import_stmt: import_name | import_from
+import_name: 'import' dotted_as_names
+# note below: the ('.' | '...') is necessary because '...' is tokenized as 
ELLIPSIS
+import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
+              'import' ('*' | '(' import_as_names ')' | import_as_names))
+import_as_name: NAME ['as' NAME]
+dotted_as_name: dotted_name ['as' NAME]
+import_as_names: import_as_name (',' import_as_name)* [',']
+dotted_as_names: dotted_as_name (',' dotted_as_name)*
+dotted_name: NAME ('.' NAME)*
+global_stmt: 'global' NAME (',' NAME)*
+nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
+assert_stmt: 'assert' test [',' test]
+
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | 
funcdef | classdef | decorated | async_stmt
+async_stmt: 'async' (funcdef | with_stmt | for_stmt)
+if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* 
['else' ':' suite]
+while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
+for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+try_stmt: ('try' ':' suite
+           ((except_clause ':' suite)+
+            ['else' ':' suite]
+            ['finally' ':' suite] |
+           'finally' ':' suite))
+with_stmt: 'with' with_item (',' with_item)*  ':' suite
+with_item: test ['as' expr]
+# NB compile.c makes sure that the default except clause is last
+except_clause: 'except' [test ['as' NAME]]
+suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+
+namedexpr_test: test [':=' test]
+test: or_test ['if' or_test 'else' test] | lambdef
+test_nocond: or_test | lambdef_nocond
+lambdef: 'lambda' [varargslist] ':' test
+lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
+or_test: and_test ('or' and_test)*
+and_test: not_test ('and' not_test)*
+not_test: 'not' not_test | comparison
+comparison: expr (comp_op expr)*
+# <> isn't actually a valid comparison operator in Python. It's here for the
+# sake of a __future__ import described in PEP 401 (which really works :-)
+comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+star_expr: '*' expr
+expr: xor_expr ('|' xor_expr)*
+xor_expr: and_expr ('^' and_expr)*
+and_expr: shift_expr ('&' shift_expr)*
+shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+arith_expr: term (('+'|'-') term)*
+term: factor (('*'|'@'|'/'|'%'|'//') factor)*
+factor: ('+'|'-'|'~') factor | power
+power: atom_expr ['**' factor]
+atom_expr: ['await'] atom trailer*
+atom: ('(' [yield_expr|testlist_comp] ')' |
+       '[' [testlist_comp] ']' |
+       '{' [dictorsetmaker] '}' |
+       NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
+testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' 
(namedexpr_test|star_expr))* [','] )
+trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+subscriptlist: subscript (',' subscript)* [',']
+subscript: test | [test] ':' [test] [sliceop]
+sliceop: ':' [test]
+exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
+testlist: test (',' test)* [',']
+dictorsetmaker: ( ((test ':' test | '**' expr)
+                   (comp_for | (',' (test ':' test | '**' expr))* [','])) |
+                  ((test | star_expr)
+                   (comp_for | (',' (test | star_expr))* [','])) )
+
+classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+arglist: argument (',' argument)*  [',']
+
+# The reason that keywords are test nodes instead of NAME is that using NAME
+# results in an ambiguity. ast.c makes sure it's a NAME.
+# "test '=' test" is really "keyword '=' test", but we have no such token.
+# These need to be in a single rule to avoid grammar that is ambiguous
+# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
+# we explicitly match '*' here, too, to give it proper precedence.
+# Illegal combinations and orderings are blocked in ast.c:
+# multiple (test comp_for) arguments are blocked; keyword unpackings
+# that precede iterable unpackings are blocked; etc.
+argument: ( test [comp_for] |
+            test ':=' test |
+            test '=' test |
+            '**' test |
+            '*' test )
+
+comp_iter: comp_for | comp_if
+sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
+comp_for: ['async'] sync_comp_for
+comp_if: 'if' test_nocond [comp_iter]
+
+# not used in grammar, but may appear in "node" passed from Parser to Compiler
+encoding_decl: NAME
+
+yield_expr: 'yield' [yield_arg]
+yield_arg: 'from' test | testlist_star_expr
+
+strings: (STRING | fstring)+
+fstring: FSTRING_START fstring_content* FSTRING_END
+fstring_content: FSTRING_STRING | fstring_expr
+fstring_conversion: '!' NAME
+fstring_expr: '{' testlist ['='] [ fstring_conversion ] [ fstring_format_spec 
] '}'
+fstring_format_spec: ':' fstring_content*
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/parso/python/tokenize.py 
new/parso-0.5.2/parso/python/tokenize.py
--- old/parso-0.5.1/parso/python/tokenize.py    2019-07-13 15:47:10.000000000 
+0200
+++ new/parso-0.5.2/parso/python/tokenize.py    2019-12-15 01:01:11.000000000 
+0100
@@ -314,17 +314,19 @@
 
 def _close_fstring_if_necessary(fstring_stack, string, start_pos, 
additional_prefix):
     for fstring_stack_index, node in enumerate(fstring_stack):
-        if string.startswith(node.quote):
+        lstripped_string = string.lstrip()
+        len_lstrip = len(string) - len(lstripped_string)
+        if lstripped_string.startswith(node.quote):
             token = PythonToken(
                 FSTRING_END,
                 node.quote,
                 start_pos,
-                prefix=additional_prefix,
+                prefix=additional_prefix+string[:len_lstrip],
             )
             additional_prefix = ''
             assert not node.previous_lines
             del fstring_stack[fstring_stack_index:]
-            return token, '', len(node.quote)
+            return token, '', len(node.quote) + len_lstrip
     return None, additional_prefix, 0
 
 
@@ -482,8 +484,20 @@
                     yield fstring_end_token
                     continue
 
-            pseudomatch = pseudo_token.match(line, pos)
-            if not pseudomatch:                             # scan for tokens
+            # in an f-string, match until the end of the string
+            if fstring_stack:
+                string_line = line
+                for fstring_stack_node in fstring_stack:
+                    quote = fstring_stack_node.quote
+                    end_match = endpats[quote].match(line, pos)
+                    if end_match is not None:
+                        end_match_string = end_match.group(0)
+                        if len(end_match_string) - len(quote) + pos < 
len(string_line):
+                            string_line = line[:pos] + 
end_match_string[:-len(quote)]
+                pseudomatch = pseudo_token.match(string_line, pos)
+            else:
+                pseudomatch = pseudo_token.match(line, pos)
+            if not pseudomatch:  # scan for tokens
                 match = whitespace.match(line, pos)
                 if pos == 0:
                     for t in dedent_if_necessary(match.end()):
@@ -560,7 +574,12 @@
                 new_line = True
             elif initial == '#':  # Comments
                 assert not token.endswith("\n")
-                additional_prefix = prefix + token
+                if fstring_stack and fstring_stack[-1].is_in_expr():
+                    # `#` is not allowed in f-string expressions
+                    yield PythonToken(ERRORTOKEN, initial, spos, prefix)
+                    pos = start + 1
+                else:
+                    additional_prefix = prefix + token
             elif token in triple_quoted:
                 endprog = endpats[token]
                 endmatch = endprog.match(line, pos)
@@ -616,10 +635,13 @@
                     else:
                         if paren_level:
                             paren_level -= 1
-                elif token == ':' and fstring_stack \
+                elif token.startswith(':') and fstring_stack \
                         and fstring_stack[-1].parentheses_count \
                         - fstring_stack[-1].format_spec_count == 1:
+                    # `:` and `:=` both count
                     fstring_stack[-1].format_spec_count += 1
+                    token = ':'
+                    pos = start + 1
 
                 yield PythonToken(OP, token, spos, prefix)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/parso/python/tree.py 
new/parso-0.5.2/parso/python/tree.py
--- old/parso-0.5.1/parso/python/tree.py        2019-07-13 15:47:10.000000000 
+0200
+++ new/parso-0.5.2/parso/python/tree.py        2019-12-15 01:01:11.000000000 
+0100
@@ -200,25 +200,22 @@
         return "<%s: %s@%s,%s>" % (type(self).__name__, self.value,
                                    self.line, self.column)
 
-    def is_definition(self):
+    def is_definition(self, include_setitem=False):
         """
         Returns True if the name is being defined.
         """
-        return self.get_definition() is not None
+        return self.get_definition(include_setitem=include_setitem) is not None
 
-    def get_definition(self, import_name_always=False):
+    def get_definition(self, import_name_always=False, include_setitem=False):
         """
-        Returns None if there's on definition for a name.
+        Returns None if there's no definition for a name.
 
-        :param import_name_alway: Specifies if an import name is always a
+        :param import_name_always: Specifies if an import name is always a
             definition. Normally foo in `from foo import bar` is not a
             definition.
         """
         node = self.parent
         type_ = node.type
-        if type_ in ('power', 'atom_expr'):
-            # In `self.x = 3` self is not a definition, but x is.
-            return None
 
         if type_ in ('funcdef', 'classdef'):
             if self == node.name:
@@ -237,7 +234,7 @@
             if node.type == 'suite':
                 return None
             if node.type in _GET_DEFINITION_TYPES:
-                if self in node.get_defined_names():
+                if self in node.get_defined_names(include_setitem):
                     return node
                 if import_name_always and node.type in _IMPORTS:
                     return node
@@ -775,8 +772,8 @@
         """
         return self.children[3]
 
-    def get_defined_names(self):
-        return _defined_names(self.children[1])
+    def get_defined_names(self, include_setitem=False):
+        return _defined_names(self.children[1], include_setitem)
 
 
 class TryStmt(Flow):
@@ -799,7 +796,7 @@
     type = 'with_stmt'
     __slots__ = ()
 
-    def get_defined_names(self):
+    def get_defined_names(self, include_setitem=False):
         """
         Returns the a list of `Name` that the with statement defines. The
         defined names are set after `as`.
@@ -808,7 +805,7 @@
         for with_item in self.children[1:-2:2]:
             # Check with items for 'as' names.
             if with_item.type == 'with_item':
-                names += _defined_names(with_item.children[2])
+                names += _defined_names(with_item.children[2], include_setitem)
         return names
 
     def get_test_node_from_name(self, name):
@@ -849,7 +846,7 @@
     type = 'import_from'
     __slots__ = ()
 
-    def get_defined_names(self):
+    def get_defined_names(self, include_setitem=False):
         """
         Returns the a list of `Name` that the import defines. The
         defined names are set after `import` or in case an alias - `as` - is
@@ -920,7 +917,7 @@
     type = 'import_name'
     __slots__ = ()
 
-    def get_defined_names(self):
+    def get_defined_names(self, include_setitem=False):
         """
         Returns the a list of `Name` that the import defines. The defined names
         is always the first name after `import` or in case an alias - `as` - is
@@ -1021,7 +1018,7 @@
     __slots__ = ()
 
 
-def _defined_names(current):
+def _defined_names(current, include_setitem):
     """
     A helper function to find the defined names in statements, for loops and
     list comprehensions.
@@ -1029,14 +1026,22 @@
     names = []
     if current.type in ('testlist_star_expr', 'testlist_comp', 'exprlist', 
'testlist'):
         for child in current.children[::2]:
-            names += _defined_names(child)
+            names += _defined_names(child, include_setitem)
     elif current.type in ('atom', 'star_expr'):
-        names += _defined_names(current.children[1])
+        names += _defined_names(current.children[1], include_setitem)
     elif current.type in ('power', 'atom_expr'):
         if current.children[-2] != '**':  # Just if there's no operation
             trailer = current.children[-1]
             if trailer.children[0] == '.':
                 names.append(trailer.children[1])
+            elif trailer.children[0] == '[' and include_setitem:
+                for node in current.children[-2::-1]:
+                    if node.type == 'trailer':
+                        names.append(node.children[1])
+                        break
+                    if node.type == 'name':
+                        names.append(node)
+                        break
     else:
         names.append(current)
     return names
@@ -1046,18 +1051,18 @@
     type = 'expr_stmt'
     __slots__ = ()
 
-    def get_defined_names(self):
+    def get_defined_names(self, include_setitem=False):
         """
         Returns a list of `Name` defined before the `=` sign.
         """
         names = []
         if self.children[1].type == 'annassign':
-            names = _defined_names(self.children[0])
+            names = _defined_names(self.children[0], include_setitem)
         return [
             name
             for i in range(0, len(self.children) - 2, 2)
             if '=' in self.children[i + 1].value
-            for name in _defined_names(self.children[i])
+            for name in _defined_names(self.children[i], include_setitem)
         ] + names
 
     def get_rhs(self):
@@ -1150,7 +1155,7 @@
         else:
             return self._tfpdef()
 
-    def get_defined_names(self):
+    def get_defined_names(self, include_setitem=False):
         return [self.name]
 
     @property
@@ -1208,12 +1213,12 @@
     type = 'sync_comp_for'
     __slots__ = ()
 
-    def get_defined_names(self):
+    def get_defined_names(self, include_setitem=False):
         """
         Returns the a list of `Name` that the comprehension defines.
         """
         # allow async for
-        return _defined_names(self.children[1])
+        return _defined_names(self.children[1], include_setitem)
 
 
 # This is simply here so an older Jedi version can work with this new parso
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/parso.egg-info/PKG-INFO 
new/parso-0.5.2/parso.egg-info/PKG-INFO
--- old/parso-0.5.1/parso.egg-info/PKG-INFO     2019-07-13 15:50:20.000000000 
+0200
+++ new/parso-0.5.2/parso.egg-info/PKG-INFO     2019-12-15 01:01:27.000000000 
+0100
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: parso
-Version: 0.5.1
+Version: 0.5.2
 Summary: A Python Parser
 Home-page: https://github.com/davidhalter/parso
 Author: David Halter
@@ -106,6 +106,13 @@
         Changelog
         ---------
         
+        0.5.2 (2019-12-15)
+        ++++++++++++++++++
+        
+        - Add include_setitem to get_definition/is_definition and 
get_defined_names (#66)
+        - Fix named expression error listing (#89, #90)
+        - Fix some f-string tokenizer issues (#93)
+        
         0.5.1 (2019-07-13)
         ++++++++++++++++++
         
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/parso.egg-info/SOURCES.txt 
new/parso-0.5.2/parso.egg-info/SOURCES.txt
--- old/parso-0.5.1/parso.egg-info/SOURCES.txt  2019-07-13 15:50:20.000000000 
+0200
+++ new/parso-0.5.2/parso.egg-info/SOURCES.txt  2019-12-15 01:01:27.000000000 
+0100
@@ -57,6 +57,7 @@
 parso/python/grammar36.txt
 parso/python/grammar37.txt
 parso/python/grammar38.txt
+parso/python/grammar39.txt
 parso/python/parser.py
 parso/python/pep8.py
 parso/python/prefix.py
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/test/failing_examples.py 
new/parso-0.5.2/test/failing_examples.py
--- old/parso-0.5.1/test/failing_examples.py    2019-07-13 15:47:10.000000000 
+0200
+++ new/parso-0.5.2/test/failing_examples.py    2019-12-15 01:01:11.000000000 
+0100
@@ -319,3 +319,35 @@
                     continue
             '''),  # 'continue' not supported inside 'finally' clause"
     ]
+
+if sys.version_info[:2] >= (3, 8):
+    # assignment expressions from issue#89
+    FAILING_EXAMPLES += [
+        # Case 2
+        '(lambda: x := 1)',
+        '((lambda: x) := 1)',
+        # Case 3
+        '(a[i] := x)',
+        '((a[i]) := x)',
+        '(a(i) := x)',
+        # Case 4
+        '(a.b := c)',
+        '[(i.i:= 0) for ((i), j) in range(5)]',
+        # Case 5
+        '[i:= 0 for i, j in range(5)]',
+        '[(i:= 0) for ((i), j) in range(5)]',
+        '[(i:= 0) for ((i), j), in range(5)]',
+        '[(i:= 0) for ((i), j.i), in range(5)]',
+        '[[(i:= i) for j in range(5)] for i in range(5)]',
+        '[i for i, j in range(5) if True or (i:= 1)]',
+        '[False and (i:= 0) for i, j in range(5)]',
+        # Case 6
+        '[i+1 for i in (i:= range(5))]',
+        '[i+1 for i in (j:= range(5))]',
+        '[i+1 for i in (lambda: (j:= range(5)))()]',
+        # Case 7
+        'class Example:\n [(j := i) for i in range(5)]',
+        # Not in that issue
+        '(await a := x)',
+        '((await a) := x)',
+    ]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/test/test_parser_tree.py 
new/parso-0.5.2/test/test_parser_tree.py
--- old/parso-0.5.1/test/test_parser_tree.py    2019-07-13 15:47:10.000000000 
+0200
+++ new/parso-0.5.2/test/test_parser_tree.py    2019-12-15 01:01:11.000000000 
+0100
@@ -180,3 +180,45 @@
 
     r = get_raise_stmts(code, 2) #  Lists inside try-catch
     assert len(list(r)) == 2
+
+
+@pytest.mark.parametrize(
+    'code, name_index, is_definition, include_setitem', [
+        ('x = 3', 0, True, False),
+        ('x.y = 3', 0, False, False),
+        ('x.y = 3', 1, True, False),
+        ('x.y = u.v = z', 0, False, False),
+        ('x.y = u.v = z', 1, True, False),
+        ('x.y = u.v = z', 2, False, False),
+        ('x.y = u.v, w = z', 3, True, False),
+        ('x.y = u.v, w = z', 4, True, False),
+        ('x.y = u.v, w = z', 5, False, False),
+
+        ('x, y = z', 0, True, False),
+        ('x, y = z', 1, True, False),
+        ('x, y = z', 2, False, False),
+        ('x, y = z', 2, False, False),
+        ('x[0], y = z', 2, False, False),
+        ('x[0] = z', 0, False, False),
+        ('x[0], y = z', 0, False, False),
+        ('x[0], y = z', 2, False, True),
+        ('x[0] = z', 0, True, True),
+        ('x[0], y = z', 0, True, True),
+        ('x: int = z', 0, True, False),
+        ('x: int = z', 1, False, False),
+        ('x: int = z', 2, False, False),
+        ('x: int', 0, True, False),
+        ('x: int', 1, False, False),
+    ]
+)
+def test_is_definition(code, name_index, is_definition, include_setitem):
+    module = parse(code, version='3.8')
+    name = module.get_first_leaf()
+    while True:
+        if name.type == 'name':
+            if name_index == 0:
+                break
+            name_index -= 1
+        name = name.get_next_leaf()
+
+    assert name.is_definition(include_setitem=include_setitem) == is_definition
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/test/test_pgen2.py 
new/parso-0.5.2/test/test_pgen2.py
--- old/parso-0.5.1/test/test_pgen2.py  2019-07-13 15:47:10.000000000 +0200
+++ new/parso-0.5.2/test/test_pgen2.py  2019-12-15 01:01:11.000000000 +0100
@@ -292,12 +292,22 @@
         generate_grammar('foo: foo NAME\n', tokenize.PythonTokenTypes)
 
 
-def test_ambiguities():
-    with pytest.raises(ValueError, match='ambiguous'):
-        generate_grammar('foo: bar | baz\nbar: NAME\nbaz: NAME\n', 
tokenize.PythonTokenTypes)
-
-    with pytest.raises(ValueError, match='ambiguous'):
-        generate_grammar('''foo: bar | baz\nbar: 'x'\nbaz: "x"\n''', 
tokenize.PythonTokenTypes)
-
-    with pytest.raises(ValueError, match='ambiguous'):
-        generate_grammar('''foo: bar | 'x'\nbar: 'x'\n''', 
tokenize.PythonTokenTypes)
+@pytest.mark.parametrize(
+    'grammar, error_match', [
+        ['foo: bar | baz\nbar: NAME\nbaz: NAME\n',
+         r"foo is ambiguous.*given a TokenType\(NAME\).*bar or baz"],
+        ['''foo: bar | baz\nbar: 'x'\nbaz: "x"\n''',
+         r"foo is ambiguous.*given a ReservedString\(x\).*bar or baz"],
+        ['''foo: bar | 'x'\nbar: 'x'\n''',
+         r"foo is ambiguous.*given a ReservedString\(x\).*bar or foo"],
+        # An ambiguity with the second (not the first) child of a production
+        ['outer: "a" [inner] "b" "c"\ninner: "b" "c" [inner]\n',
+         r"outer is ambiguous.*given a ReservedString\(b\).*inner or outer"],
+        # An ambiguity hidden by a level of indirection (middle)
+        ['outer: "a" [middle] "b" "c"\nmiddle: inner\ninner: "b" "c" 
[inner]\n',
+         r"outer is ambiguous.*given a ReservedString\(b\).*middle or outer"],
+    ]
+)
+def test_ambiguities(grammar, error_match):
+    with pytest.raises(ValueError, match=error_match):
+        generate_grammar(grammar, tokenize.PythonTokenTypes)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/test/test_python_errors.py 
new/parso-0.5.2/test/test_python_errors.py
--- old/parso-0.5.1/test/test_python_errors.py  2019-07-13 15:47:10.000000000 
+0200
+++ new/parso-0.5.2/test/test_python_errors.py  2019-12-15 01:01:11.000000000 
+0100
@@ -294,6 +294,19 @@
 
 
 @pytest.mark.parametrize(
+    'code', [
+        'a = (b := 1)',
+        '[x4 := x ** 5 for x in range(7)]',
+        '[total := total + v for v in range(10)]',
+        'while chunk := file.read(2):\n pass',
+        'numbers = [y := math.factorial(x), y**2, y**3]',
+    ]
+)
+def test_valid_namedexpr(code):
+    assert not _get_error_list(code, version='3.8')
+
+
+@pytest.mark.parametrize(
     ('code', 'message'), [
         ("f'{1+}'", ('invalid syntax')),
         (r'f"\"', ('invalid syntax')),
@@ -307,3 +320,15 @@
     """
     error, = _get_error_list(code, version='3.6')
     assert message in error.message
+
+
+@pytest.mark.parametrize(
+    'code', [
+        "from foo import (\nbar,\n rab,\n)",
+        "from foo import (bar, rab, )",
+    ]
+)
+def test_trailing_comma(code):
+    errors = _get_error_list(code)
+    assert not errors
+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/test/test_tokenize.py 
new/parso-0.5.2/test/test_tokenize.py
--- old/parso-0.5.1/test/test_tokenize.py       2019-07-13 15:47:10.000000000 
+0200
+++ new/parso-0.5.2/test/test_tokenize.py       2019-12-15 01:01:11.000000000 
+0100
@@ -385,8 +385,32 @@
             NAME, OP, FSTRING_START, FSTRING_STRING, OP, NAME, OP,
             FSTRING_STRING, OP, FSTRING_STRING, OP, NAME, OP, FSTRING_END, OP
         ]),
+        # issue #86, a string-like in an f-string expression
+        ('f"{ ""}"', [
+            FSTRING_START, OP, FSTRING_END, STRING
+        ]),
+        ('f"{ f""}"', [
+            FSTRING_START, OP, NAME, FSTRING_END, STRING
+        ]),
     ]
 )
 def test_fstring(code, types, version_ge_py36):
     actual_types = [t.type for t in _get_token_list(code, version_ge_py36)]
     assert types + [ENDMARKER] == actual_types
+
+
+@pytest.mark.parametrize(
+    ('code', 'types'), [
+        # issue #87, `:=` in the outest paratheses should be tokenized
+        # as a format spec marker and part of the format
+        ('f"{x:=10}"', [
+            FSTRING_START, OP, NAME, OP, FSTRING_STRING, OP, FSTRING_END
+        ]),
+        ('f"{(x:=10)}"', [
+            FSTRING_START, OP, OP, NAME, OP, NUMBER, OP, OP, FSTRING_END
+        ]),
+    ]
+)
+def test_fstring_assignment_expression(code, types, version_ge_py38):
+    actual_types = [t.type for t in _get_token_list(code, version_ge_py38)]
+    assert types + [ENDMARKER] == actual_types
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.5.1/tox.ini new/parso-0.5.2/tox.ini
--- old/parso-0.5.1/tox.ini     2019-07-13 15:47:10.000000000 +0200
+++ new/parso-0.5.2/tox.ini     2019-12-15 01:01:11.000000000 +0100
@@ -1,10 +1,10 @@
 [tox]
-envlist = {py26,py27,py33,py34,py35,py36,py37}
+envlist = {py26,py27,py33,py34,py35,py36,py37,py38}
 [testenv]
 extras = testing
 deps =
     py26,py33: pytest>=3.0.7,<3.3
-    py27,py34: pytest<5
+    py27,py34: pytest<3.3
     py26,py33: setuptools<37
     coverage: coverage
 setenv =


Reply via email to