Reviewers: marja,
Message:
Committed patchset #1 manually as r17556 (presubmit successful).
Description:
Experimental parser: some cleanup and assertions for rule_parser
[email protected]
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=17556
Please review this at https://codereview.chromium.org/64213002/
SVN Base: https://v8.googlecode.com/svn/branches/experimental/parser
Affected files (+27, -14 lines):
M tools/lexer_generator/action_test.py
M tools/lexer_generator/generator.py
M tools/lexer_generator/regex_parser.py
M tools/lexer_generator/rule_parser.py
M tools/lexer_generator/rule_parser_test.py
Index: tools/lexer_generator/action_test.py
diff --git a/tools/lexer_generator/action_test.py
b/tools/lexer_generator/action_test.py
index
bb80100f83e8f49a005a00d91a8cb91c362f3b3f..84f659b8fb22dd1a6709afd1a08ac950cfd816a4
100644
--- a/tools/lexer_generator/action_test.py
+++ b/tools/lexer_generator/action_test.py
@@ -39,8 +39,8 @@ def process_rules(parser_state):
builder = NfaBuilder()
for k, v in parser_state.rules.items():
graphs = []
- for (graph, precedence, code, condition) in v['regex']:
- graphs.append(NfaBuilder.add_action(graph, (precedence, code,
condition)))
+ for (graph, action) in v['regex']:
+ graphs.append(NfaBuilder.add_action(graph, action))
nfa = builder.nfa(NfaBuilder.or_graphs(graphs))
dfa = dfa_from_nfa(nfa)
rule_map[k] = (nfa, dfa)
Index: tools/lexer_generator/generator.py
diff --git a/tools/lexer_generator/generator.py
b/tools/lexer_generator/generator.py
index
a257568a8ec10b194387bdcd29298f9b4b8a6200..035b5a746294691dae525be36db6f8fa753c0477
100644
--- a/tools/lexer_generator/generator.py
+++ b/tools/lexer_generator/generator.py
@@ -79,13 +79,16 @@ def process_rules(parser_state):
rule_map = {}
builder = NfaBuilder()
builder.set_character_classes(parser_state.character_classes)
+ assert 'default' in parser_state.rules
for k, v in parser_state.rules.items():
+ assert 'default' in v
graphs = []
- for (graph, precedence, code, action) in v['regex']:
- graphs.append(NfaBuilder.add_action(graph, (precedence, code,
action)))
- rule_map[k] = builder.nfa(NfaBuilder.or_graphs(graphs))
+ for (graph, action) in v['regex']:
+ graphs.append(NfaBuilder.add_action(graph, action))
+ rule_map[k] = NfaBuilder.or_graphs(graphs)
html_data = []
- for rule_name, nfa in rule_map.items():
+ for rule_name, graph in rule_map.items():
+ nfa = builder.nfa(graph)
(start, dfa_nodes) = nfa.compute_dfa()
dfa = Dfa(start, dfa_nodes)
html_data.append((rule_name, nfa, dfa))
Index: tools/lexer_generator/regex_parser.py
diff --git a/tools/lexer_generator/regex_parser.py
b/tools/lexer_generator/regex_parser.py
index
c508cb35f5ab636ac23ee97c9adb10306c9e8fa9..ed9d4ddd74219fdb7fec12a8743c8c5f34ab28c4
100644
--- a/tools/lexer_generator/regex_parser.py
+++ b/tools/lexer_generator/regex_parser.py
@@ -158,6 +158,6 @@ class RegexParser:
RegexParser.__static_instance = parser
try:
return parser.parser.parse(data, lexer=parser.lexer.lexer)
- except Exception as e:
+ except Exception:
RegexParser.__static_instance = None
- raise e
+ raise
Index: tools/lexer_generator/rule_parser.py
diff --git a/tools/lexer_generator/rule_parser.py
b/tools/lexer_generator/rule_parser.py
index
a616fec33d1c2faa49c833e85cf83b94b11f5003..ebdd4fa556b4d5ce79ad96f5cae54e3d8ff13837
100644
--- a/tools/lexer_generator/rule_parser.py
+++ b/tools/lexer_generator/rule_parser.py
@@ -40,6 +40,7 @@ class RuleParserState:
self.character_classes = {}
self.current_state = None
self.rules = {}
+ self.transitions = set()
def parse(self, string):
return RuleParser.parse(string, self)
@@ -48,6 +49,8 @@ class RuleParser:
tokens = RuleLexer.tokens
__rule_precedence_counter = 0
+ __keyword_transitions = set([
+ 'continue', 'break', 'terminate', 'terminate_illegal'])
def __init__(self):
self.__state = None
@@ -95,9 +98,15 @@ class RuleParser:
'''transition_rule : composite_regex_or_default code action
| composite_regex_or_default empty action
| composite_regex_or_default code empty'''
- rules = self.__state.rules[self.__state.current_state]
- rule = (p[1], RuleParser.__rule_precedence_counter, p[2], p[3])
+ transition = p[3] if p[3] else 'continue'
+ if transition == 'continue' and self.__state.current_state
== 'default':
+ transition = 'break'
+ if not transition in self.__keyword_transitions:
+ assert not transition == 'default'
+ self.__state.transitions.add(transition)
+ rule = (p[1], (RuleParser.__rule_precedence_counter, p[2], transition))
RuleParser.__rule_precedence_counter += 1
+ rules = self.__state.rules[self.__state.current_state]
if p[1] == 'default':
assert not rules['default']
rules['default'] = rule
@@ -197,7 +206,8 @@ class RuleParser:
parser.__state = parser_state
try:
parser.parser.parse(data, lexer=parser.lexer.lexer)
- except Exception as e:
+ except Exception:
RuleParser.__static_instance = None
- raise e
+ raise
+ assert parser_state.transitions <= set(parser_state.rules.keys())
parser.__state = None
Index: tools/lexer_generator/rule_parser_test.py
diff --git a/tools/lexer_generator/rule_parser_test.py
b/tools/lexer_generator/rule_parser_test.py
index
f0bf6df1d355544f7be24b571a140c6f9b9845b9..9a904395f9d22e92552bb3362271d3da593b8623
100644
--- a/tools/lexer_generator/rule_parser_test.py
+++ b/tools/lexer_generator/rule_parser_test.py
@@ -44,8 +44,8 @@ alias = /regex/;
<cond1> alias <<cond2>>
<cond2> /regex/ {body}
<cond2> alias {body}
-<cond3> /regex/ {body} <<cond4>>
-<cond3> alias {body} <<cond4>>''')
+<cond3> /regex/ {body} <<cond1>>
+<cond3> alias {body} <<cond1>>''')
self.assertTrue(len(self.state.aliases), 1)
self.assertTrue('alias' in self.state.aliases)
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.