Reviewers: marja,

Message:
Committed patchset #1 manually as r17483 (presubmit successful).

Description:
Experimental parser: parsing regex subexpressions

[email protected]

BUG=

Committed: https://code.google.com/p/v8/source/detail?r=17483

Please review this at https://codereview.chromium.org/59603003/

SVN Base: https://v8.googlecode.com/svn/branches/experimental/parser

Affected files (+56, -29 lines):
  M src/lexer/lexer_py.re
  M tools/lexer_generator/automata_test.py
  M tools/lexer_generator/regex_lexer.py
  M tools/lexer_generator/rule_lexer.py
  M tools/lexer_generator/rule_parser.py


Index: src/lexer/lexer_py.re
diff --git a/src/lexer/lexer_py.re b/src/lexer/lexer_py.re
index f7cd6c0ed30e0e8ff71fce8dafd238db24a1e7c2..17d5c3a6e58f571adcb7797b25bf6d0f89010a6d 100644
--- a/src/lexer/lexer_py.re
+++ b/src/lexer/lexer_py.re
@@ -25,7 +25,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

-whitespace_char = [ \t\v\f\r\240:ws:];
+whitespace_char = [ \t\v\f\r:ws:]; # TODO put back \240
 whitespace = whitespace_char+;
 identifier_start = [$_a-zA-Z:lit:];
 identifier_char = [$_a-zA-Z0-9:lit:];
@@ -33,7 +33,7 @@ not_identifier_char = [^:identifier_char:];
 line_terminator = [\n\r]+;
 digit = [0-9];
 hex_digit = [0-9a-fA-F];
-maybe_exponent = ("e" [-+]? digit+)?;
+maybe_exponent = ("e" [\-+]? digit+)?;
number = ("0x" hex_digit+) | (("." digit+ maybe_exponent) | (digit+ ("." digit*)? maybe_exponent));

<Normal> "break" not_identifier_char { PUSH_TOKEN_LOOKAHEAD(Token::BREAK); } @@ -151,7 +151,7 @@ number = ("0x" hex_digit+) | (("." digit+ maybe_exponent) | (digit+ ("." digit*)
 <Normal> "'"           :=> SingleQuoteString

 <Normal> identifier_start     :=> Identifier
-<Normal> "\\u[0-9a-fA-F]{4}" { if (ValidIdentifierStart()) { YYSETCONDITION(kConditionIdentifier); goto yyc_Identifier; } send(Token::ILLEGAL); start_ = cursor_; goto yyc_Normal; } +<Normal> /\\u[0-9a-fA-F]{4}/ { if (ValidIdentifierStart()) { YYSETCONDITION(kConditionIdentifier); goto yyc_Identifier; } send(Token::ILLEGAL); start_ = cursor_; goto yyc_Normal; }
 <Normal> "\\"                 { PUSH_TOKEN(Token::ILLEGAL); }

 <Normal> eof           { PUSH_EOF_AND_RETURN();}
@@ -160,25 +160,25 @@ number = ("0x" hex_digit+) | (("." digit+ maybe_exponent) | (digit+ ("." digit*)
 <DoubleQuoteString> "\\\\"  { goto yyc_DoubleQuoteString; }
 <DoubleQuoteString> "\\\""  { goto yyc_DoubleQuoteString; }
 <DoubleQuoteString> "\""     { PUSH_TOKEN(Token::STRING);}
-<DoubleQuoteString> "\\\n\r?" { goto yyc_DoubleQuoteString; }
-<DoubleQuoteString> "\\\r\n?" { goto yyc_DoubleQuoteString; }
+<DoubleQuoteString> /\\\n\r?/ { goto yyc_DoubleQuoteString; }
+<DoubleQuoteString> /\\\r\n?/ { goto yyc_DoubleQuoteString; }
<DoubleQuoteString> "\n" => Normal { PUSH_TOKEN_LOOKAHEAD(Token::ILLEGAL); } <DoubleQuoteString> "\r" => Normal { PUSH_TOKEN_LOOKAHEAD(Token::ILLEGAL); }
 <DoubleQuoteString> eof     { TERMINATE_ILLEGAL(); }
 <DoubleQuoteString> any     { goto yyc_DoubleQuoteString; }

-<SingleQuoteString> "\\\\"  { goto yyc_SingleQuoteString; }
+<SingleQuoteString> "\\"  { goto yyc_SingleQuoteString; }
 <SingleQuoteString> "\\'"   { goto yyc_SingleQuoteString; }
-<SingleQuoteString> "'"     { PUSH_TOKEN(Token::STRING);}
-<SingleQuoteString> "\\\n\r?" { goto yyc_SingleQuoteString; }
-<SingleQuoteString> "\\\r\n?" { goto yyc_SingleQuoteString; }
+<SingleQuoteString> "'"     { PUSH_TOKEN(Token::STRING); }
+<SingleQuoteString> /\\\n\r?/ { goto yyc_SingleQuoteString; }
+<SingleQuoteString> /\\\r\n?/ { goto yyc_SingleQuoteString; }
<SingleQuoteString> "\n" => Normal { PUSH_TOKEN_LOOKAHEAD(Token::ILLEGAL); } <SingleQuoteString> "\r" => Normal { PUSH_TOKEN_LOOKAHEAD(Token::ILLEGAL); }
 <SingleQuoteString> eof     { TERMINATE_ILLEGAL(); }
 <SingleQuoteString> any     { goto yyc_SingleQuoteString; }

 <Identifier> identifier_char+  { goto yyc_Identifier; }
-<Identifier> "\\u[0-9a-fA-F]{4}" { if (ValidIdentifierPart()) { goto yyc_Identifier; } YYSETCONDITION(kConditionNormal); send(Token::ILLEGAL); start_ = cursor_; goto yyc_Normal; } +<Identifier> /\\u[0-9a-fA-F]{4}/ { if (ValidIdentifierPart()) { goto yyc_Identifier; } YYSETCONDITION(kConditionNormal); send(Token::ILLEGAL); start_ = cursor_; goto yyc_Normal; }
 <Identifier> "\\"              { PUSH_TOKEN(Token::ILLEGAL); }
 <Identifier> any               { PUSH_TOKEN_LOOKAHEAD(Token::IDENTIFIER); }

@@ -186,11 +186,11 @@ number = ("0x" hex_digit+) | (("." digit+ maybe_exponent) | (digit+ ("." digit*) <SingleLineComment> eof { start_ = cursor_ - 1; PUSH_TOKEN(Token::EOS); }
 <SingleLineComment> any             { goto yyc_SingleLineComment; }

-<MultiLineComment> "*//"  { PUSH_LINE_TERMINATOR();}
+<MultiLineComment> "*/"  { PUSH_LINE_TERMINATOR();}
<MultiLineComment> eof { start_ = cursor_ - 1; PUSH_TOKEN(Token::EOS); }
 <MultiLineComment> any      { goto yyc_MultiLineComment; }

-<HtmlComment> eof        { start_ = cursor_ - 1; PUSH_TOKEN(Token::EOS); }
 <HtmlComment> "-->"      { PUSH_LINE_TERMINATOR();}
 <HtmlComment> line_terminator+ { PUSH_LINE_TERMINATOR();}
+<HtmlComment> eof        { start_ = cursor_ - 1; PUSH_TOKEN(Token::EOS); }
 <HtmlComment> any        { goto yyc_HtmlComment; }
Index: tools/lexer_generator/automata_test.py
diff --git a/tools/lexer_generator/automata_test.py b/tools/lexer_generator/automata_test.py index 3e80db6247a50d8039bd9e4071a5519a7ff8aa70..c5bbb0bd26c2fc7a0fc75ac2ee448e2063dcf7f9 100644
--- a/tools/lexer_generator/automata_test.py
+++ b/tools/lexer_generator/automata_test.py
@@ -54,6 +54,7 @@ class AutomataTestCase(unittest.TestCase):
       ("a.?b", ["aab", "abb", "acb", "ab"], ["aaab", ""]),
       ("a.+b", ["aab", "abb", "acb"], ["aaac", "ab", ""]),
       (".|.", ["a", "b"], ["aa", ""]),
+      ("//.", ["//a"], ["aa", ""]),
     ]

     def test_matches(self):
@@ -84,9 +85,9 @@ class AutomataTestCase(unittest.TestCase):
       dfa = dfa_from_nfa(nfa)
       def verify(string, expected):
         actions = list(dfa.collect_actions(string))
-        assertEqual(len(expected), len(actions))
+        self.assertEqual(len(expected), len(actions))
         for i, action in enumerate(actions):
-          assertEqual(action[i], expected[i])
+          self.assertEqual(action[i], expected[i])
       def verify_miss(string, expected):
         verify(string, expected + [('MISS',)])
       def verify_hit(string, expected):
Index: tools/lexer_generator/regex_lexer.py
diff --git a/tools/lexer_generator/regex_lexer.py b/tools/lexer_generator/regex_lexer.py index 26c44870b807d2c68282ccda9fc59516206647ce..bd25b7b24f3d14428a55e97f57ad2287fd698d46 100644
--- a/tools/lexer_generator/regex_lexer.py
+++ b/tools/lexer_generator/regex_lexer.py
@@ -85,15 +85,15 @@ class RegexLexer:

   t_class_RANGE = '-'
   t_class_NOT = '\^'
-  t_class_CHARACTER_CLASS = ':ws:|:lit:'
+  t_class_CHARACTER_CLASS = r':\w+:'

   def t_class_ESCAPED_CLASS_LITERAL(self, t):
-    r'\\\^|\\-|\\\[|\\\]\\:'
+    r'\\\^|\\-|\\\[|\\\]|\\\:|\\\w'
     t.type = 'CLASS_LITERAL'
     t.value = t.value[1:]
     return t

-  t_class_CLASS_LITERAL = r'[a-zA-Z0-9]' # fix this
+  t_class_CLASS_LITERAL = r'[\w $_:+]' # fix this

   t_ANY_ignore  = '\n'

Index: tools/lexer_generator/rule_lexer.py
diff --git a/tools/lexer_generator/rule_lexer.py b/tools/lexer_generator/rule_lexer.py index 05bf03a0d5201b248fac3ea8a6eb2a5c43de3ef2..ae5c8bb57dfb745c48eb9fd2794db7a5b22134d8 100644
--- a/tools/lexer_generator/rule_lexer.py
+++ b/tools/lexer_generator/rule_lexer.py
@@ -31,7 +31,8 @@ class RuleLexer:

   tokens = (
     'IDENTIFIER',
-    'STRING_REGEX',
+    'STRING',
+    'REGEX',
     'CHARACTER_CLASS_REGEX',
     'TRANSITION',
     'TRANSITION_WITH_CODE',
@@ -65,7 +66,8 @@ class RuleLexer:
     pass

   t_IDENTIFIER = r'[a-zA-Z0-9_]+'
-  t_STRING_REGEX = r'"((\\("|\w|\\))|[^\\"])+"'
+  t_STRING = r'"((\\("|\w|\\))|[^\\"])+"'
+  t_REGEX = r'/[^\/]+/'
   t_CHARACTER_CLASS_REGEX = r'\[([^\]]|\\\])+\]'
   t_TRANSITION = r':=>'
   t_TRANSITION_WITH_CODE = r'=>'
Index: tools/lexer_generator/rule_parser.py
diff --git a/tools/lexer_generator/rule_parser.py b/tools/lexer_generator/rule_parser.py index 46cce1378c01a6710728f89f464f02cec7857a70..61e259a70a5cac2d759a0bd75360f20091c48645 100644
--- a/tools/lexer_generator/rule_parser.py
+++ b/tools/lexer_generator/rule_parser.py
@@ -27,13 +27,17 @@

 import ply.yacc as yacc
 from rule_lexer import RuleLexer
+from regex_parser import RegexParser

 class RuleParser:

   tokens = RuleLexer.tokens

   def __init__(self):
-    self.aliases = {}
+    self.aliases = {
+      'eof' : "eof rule",
+      'any' : "any rule",
+    }
     self.current_transition = None
     self.rules = {}

@@ -80,28 +84,48 @@ class RuleParser:
     p[0] = self.current_transition

   def p_composite_regex(self, p):
-    '''composite_regex : regex_part OR regex_part maybe_regex_parts
-                       | regex_part maybe_regex_parts'''
+    '''composite_regex : regex_parts OR regex_parts
+                       | regex_parts'''
     if p[len(p)-1]:
       p[0] = p[1:]
     else:
       p[0] = p[1:-1]

-  def p_maybe_regex_part(self, p):
-    '''maybe_regex_parts : composite_regex
-                         | empty'''
-    p[0] = p[1]
+  def p_regex_parts(self, p):
+    '''regex_parts : regex_part
+                   | regex_part regex_parts'''
+    p[0] = p[1:]

   def p_regex_part(self, p):
'''regex_part : LEFT_PARENTHESIS composite_regex RIGHT_PARENTHESIS modifier
-                  | STRING_REGEX modifier
-                  | CHARACTER_CLASS_REGEX modifier
-                  | IDENTIFIER modifier'''
+                  | regex_string_literal modifier
+                  | regex_class modifier
+                  | regex modifier
+                  | regex_alias modifier'''
     if p[len(p)-1]:
       p[0] = p[1:]
     else:
       p[0] = p[1:-1]

+  def p_regex_string_literal(self, p):
+    'regex_string_literal : STRING'
+    string = p[1][1:-1]
+    for c in "\+?|*[]()":
+      string = string.replace(c, "\\" + c)
+    p[0] = RegexParser.parse(string)
+
+  def p_regex(self, p):
+    'regex : REGEX'
+    p[0] = RegexParser.parse(p[1][1:-1])
+
+  def p_regex_class(self, p):
+    'regex_class : CHARACTER_CLASS_REGEX'
+    p[0] = RegexParser.parse(p[1])
+
+  def p_regex_alias(self, p):
+    'regex_alias : IDENTIFIER'
+    p[0] = self.aliases[p[1]]
+
   def p_modifier(self, p):
     '''modifier : PLUS
                 | QUESTION_MARK


--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.

Reply via email to