Revision: 17936
Author:   [email protected]
Date:     Wed Nov 20 16:10:09 2013 UTC
Log: Experimental lexer generator: generate code for utf-16 character classes.

[email protected]
[email protected]
BUG=

Review URL: https://codereview.chromium.org/78713002
http://code.google.com/p/v8/source/detail?r=17936

Modified:
 /branches/experimental/parser/src/char-predicates.h
 /branches/experimental/parser/src/lexer/lexer.gyp
 /branches/experimental/parser/src/lexer/lexer_py.re
 /branches/experimental/parser/src/scanner.h
 /branches/experimental/parser/tools/lexer_generator/code_generator.jinja
 /branches/experimental/parser/tools/lexer_generator/code_generator.py
 /branches/experimental/parser/tools/lexer_generator/generator.py
 /branches/experimental/parser/tools/lexer_generator/transition_key_test.py
 /branches/experimental/parser/tools/lexer_generator/transition_keys.py

=======================================
--- /branches/experimental/parser/src/char-predicates.h Fri Jul 19 09:57:35 2013 UTC +++ /branches/experimental/parser/src/char-predicates.h Wed Nov 20 16:10:09 2013 UTC
@@ -66,6 +66,16 @@
   }
 };

+struct IdentifierPartNotLetter {
+  static inline bool Is(uc32 c) {
+    return unibrow::Number::Is(c)
+        || c == 0x200C  // U+200C is Zero-Width Non-Joiner.
+        || c == 0x200D  // U+200D is Zero-Width Joiner.
+        || unibrow::CombiningMark::Is(c)
+        || unibrow::ConnectorPunctuation::Is(c);
+  }
+};
+
 } }  // namespace v8::internal

 #endif  // V8_CHAR_PREDICATES_H_
=======================================
--- /branches/experimental/parser/src/lexer/lexer.gyp Wed Nov 20 13:56:20 2013 UTC +++ /branches/experimental/parser/src/lexer/lexer.gyp Wed Nov 20 16:10:09 2013 UTC
@@ -74,7 +74,7 @@
             '../../tools/lexer_generator/generator.py',
             '--re=../../src/lexer/lexer_py.re',
             '--code=<(SHARED_INTERMEDIATE_DIR)/generated_lexer_latin1.cc',
-            '--char-type=uint8_t',
+            '--encoding=latin1',
           ],
         },
         {
@@ -92,7 +92,7 @@
             '../../tools/lexer_generator/generator.py',
             '--re=../../src/lexer/lexer_py.re',
             '--code=<(SHARED_INTERMEDIATE_DIR)/generated_lexer_utf16.cc',
-            '--char-type=uint16_t',
+            '--encoding=utf16',
           ],
         },
       ],
=======================================
--- /branches/experimental/parser/src/lexer/lexer_py.re Tue Nov 19 11:07:13 2013 UTC +++ /branches/experimental/parser/src/lexer/lexer_py.re Wed Nov 20 16:10:09 2013 UTC
@@ -25,10 +25,10 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

-whitespace_char = [ \t\v\f\r:ws:\240];
+whitespace_char = [ \t\v\f\r:whitespace:\240];
 whitespace = whitespace_char+;
-identifier_start = [$_a-zA-Z:lit:];
-identifier_char = [0-9:identifier_start:];
+identifier_start = [$_a-zA-Z:letter:];
+identifier_char = [0-9:identifier_part_not_letter::identifier_start:];
 line_terminator = [\n\r];
 digit = [0-9];
 hex_digit = [0-9a-fA-F];
=======================================
--- /branches/experimental/parser/src/scanner.h Thu Oct 10 11:58:16 2013 UTC
+++ /branches/experimental/parser/src/scanner.h Wed Nov 20 16:10:09 2013 UTC
@@ -139,12 +139,18 @@
bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); } bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
   bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
+  bool IsLetter(unibrow::uchar c) { return kIsLetter.get(c); }
+  bool IsIdentifierPartNotLetter(unibrow::uchar c) {
+    return kIsIdentifierPartNotLetter.get(c);
+  }

  private:
   unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
   unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
   unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
   unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+  unibrow::Predicate<unibrow::Letter, 128> kIsLetter;
+ unibrow::Predicate<IdentifierPartNotLetter, 128> kIsIdentifierPartNotLetter;
   StaticResource<Utf8Decoder> utf8_decoder_;

   DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
=======================================
--- /branches/experimental/parser/tools/lexer_generator/code_generator.jinja Wed Nov 20 13:56:20 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/code_generator.jinja Wed Nov 20 16:10:09 2013 UTC
@@ -1,6 +1,5 @@
 #include "lexer/even-more-experimental-scanner.h"

-{# TODO implement CLASS checks #}
 {%- macro do_key(key) -%}
   {%- for r in key -%}
     {%- if not loop.first %} || {% endif -%}
@@ -19,8 +18,15 @@
        (yych == 0 && cursor_ >= buffer_end_)
       {%- elif r[1] == 'zero' -%}
        (yych == 0 && cursor_ < buffer_end_)
+      {%- elif r[1] == 'whitespace' and encoding == 'utf16'-%}
+        unicode_cache_->IsWhiteSpace(yych)
+      {%- elif r[1] == 'letter' and encoding == 'utf16'-%}
+        {# FIXME: Add and use unicode_cache_->InNonAsciiLetter #}
+ (!(yych >= 'a' && yych <= 'z') && !(yych >= 'A' && yych <= 'Z') && unicode_cache_->IsLetter(yych)) + {%- elif r[1] == 'identifier_part_not_letter' and encoding == 'utf16'-%}
+        unicode_cache_->IsIdentifierPartNotLetter(yych)
       {%- else -%}
-       false
+       false /* {{r[1]}} */
       {%- endif -%}
     {%- else -%}
       false
=======================================
--- /branches/experimental/parser/tools/lexer_generator/code_generator.py Wed Nov 20 13:56:20 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/code_generator.py Wed Nov 20 16:10:09 2013 UTC
@@ -35,7 +35,7 @@

   def __init__(self,
                rule_processor,
-               char_type,
+               encoding = 'latin1',
                minimize_default = True,
                inline = True,
                switching = True,
@@ -52,7 +52,7 @@
     self.__log = log
     self.__inline = inline
     self.__switching = switching
-    self.__char_type = char_type
+    self.__encoding = encoding

   def __state_cmp(self, left, right):
     if left['original_node_number'] == self.__start_node_number:
@@ -242,9 +242,16 @@
       undefined = jinja2.StrictUndefined)
     template = template_env.get_template('code_generator.jinja')

+    if self.__encoding == 'latin1':
+      char_type = 'uint8_t'
+    elif self.__encoding == 'utf16':
+      char_type = 'uint16_t'
+    else:
+      raise Exception('Unsupported encoding %s' % encoding)
     return template.render(
       start_node_number = 0,
       debug_print = self.__debug_print,
       default_action = default_action,
       dfa_states = dfa_states,
-      char_type = self.__char_type)
+      encoding = self.__encoding,
+      char_type = char_type)
=======================================
--- /branches/experimental/parser/tools/lexer_generator/generator.py Wed Nov 20 13:56:20 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/generator.py Wed Nov 20 16:10:09 2013 UTC
@@ -97,7 +97,7 @@
   parser.add_argument('--re', default='src/lexer/lexer_py.re')
   parser.add_argument('--input')
   parser.add_argument('--code')
-  parser.add_argument('--char-type')
+  parser.add_argument('--encoding', default='latin1')
   parser.add_argument('--no-minimize-default', action='store_true')
   parser.add_argument('--no-verify-default', action='store_true')
   parser.add_argument('--no-inline', action='store_true')
@@ -133,13 +133,9 @@
         print "wrote html to %s" % html_file

   code_file = args.code
-  char_type = args.char_type
-  if not char_type:
-    char_type = 'uint8_t'
-
   if code_file:
     code_generator = CodeGenerator(rule_processor,
-                                   char_type,
+                                   encoding = args.encoding,
                                    minimize_default = minimize_default,
                                    log = verbose,
                                    inline = not args.no_inline,
=======================================
--- /branches/experimental/parser/tools/lexer_generator/transition_key_test.py Tue Nov 19 18:47:08 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/transition_key_test.py Wed Nov 20 16:10:09 2013 UTC
@@ -51,7 +51,7 @@
       ("1-2", "12", "ab"),
       ("a-zA-Z", "abyzABYZ" , "123"),
       ("a-zA-Z0g" , "abyzABYZ0" , "123"),
-      ("a-z:ws::lit:" , "abc" , "123"),
+      ("a-z:whitespace::letter:" , "abc" , "123"),
     ]
     classes = {}
     for (string, match, no_match) in data:
=======================================
--- /branches/experimental/parser/tools/lexer_generator/transition_keys.py Tue Nov 19 18:47:08 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/transition_keys.py Wed Nov 20 16:10:09 2013 UTC
@@ -41,9 +41,10 @@
# These are not real ranges; they just need to be separate from any real
     # ranges.
     'whitespace' : (256, 256),
-    'literal' : (257, 257),
-    'eos' : (258, 258),
-    'zero' : (259, 259),
+    'letter' : (257, 257),
+    'identifier_part_not_letter' : (258, 258),
+    'eos' : (259, 259),
+    'zero' : (260, 260),
   }
   __lower_bound = 1
   __upper_bound = max(__class_bounds.values(), key=lambda item: item[1])[1]
@@ -139,14 +140,8 @@
         TransitionKey.__process_graph(x, ranges, key_map)
     elif key == 'CHARACTER_CLASS':
       class_name = graph[1]
-      if class_name == 'ws':
-        ranges.append(TransitionKey.__class_bounds['whitespace'])
-      elif class_name == 'lit':
-        ranges.append(TransitionKey.__class_bounds['literal'])
-      elif class_name == 'eos':
-        ranges.append(TransitionKey.__class_bounds['eos'])
-      elif class_name == 'zero':
-        ranges.append(TransitionKey.__class_bounds['zero'])
+      if class_name in TransitionKey.__class_bounds.keys():
+        ranges.append(TransitionKey.__class_bounds[class_name])
       elif class_name in key_map:
         ranges += key_map[class_name].__ranges
       else:

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.

Reply via email to