Updated Branches: refs/heads/trunk f0c0224da -> d4f205157
cqlsh: update syntax for tab completions Patch by Paul Cannon, reviewed by brandonwilliams for CASSANDRA-3757 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d4f20515 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d4f20515 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d4f20515 Branch: refs/heads/trunk Commit: d4f2051578948448b35fa59d85bd70ae4d3673b0 Parents: f0c0224 Author: paul cannon <[email protected]> Authored: Mon Jan 30 15:43:53 2012 -0600 Committer: Brandon Williams <[email protected]> Committed: Mon Jan 30 15:43:53 2012 -0600 ---------------------------------------------------------------------- bin/cqlsh | 35 ++++--- pylib/cqlshlib/cqlhandling.py | 206 ++++++++++++++++++++++++++++++------ pylib/cqlshlib/pylexotron.py | 39 +++++--- 3 files changed, 219 insertions(+), 61 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/d4f20515/bin/cqlsh ---------------------------------------------------------------------- diff --git a/bin/cqlsh b/bin/cqlsh index 74d62f2..4e67fda 100755 --- a/bin/cqlsh +++ b/bin/cqlsh @@ -619,22 +619,22 @@ class Shell(cmd.Cmd): self.reset_statement() print - def onecmd(self, statement): + def onecmd(self, statementtext): """ Returns true if the statement is complete and was handled (meaning it can be reset). """ try: - statements, in_batch = cqlhandling.cql_split_statements(statement) + statements, in_batch = cqlhandling.cql_split_statements(statementtext) except pylexotron.LexingError, e: if self.show_line_nums: self.printerr('Invalid syntax at char %d' % (e.charnum,)) else: self.printerr('Invalid syntax at line %d, char %d' % (e.linenum, e.charnum)) - statement = statement.split('\n')[e.linenum - 1] - self.printerr(' %s' % statement) + statementline = statementtext.split('\n')[e.linenum - 1] + self.printerr(' %s' % statementline) self.printerr(' %s^' % (' ' * e.charnum)) return True @@ -647,7 +647,7 @@ class Shell(cmd.Cmd): return for st in statements: try: - self.handle_statement(st) + self.handle_statement(st, statementtext) except Exception, e: if self.debug: import traceback @@ -665,25 +665,26 @@ class Shell(cmd.Cmd): self.printerr('Incomplete statement at end of file') self.do_exit() - def handle_statement(self, tokens): + def handle_statement(self, tokens, srcstr): cmdword = tokens[0][1] if cmdword == '?': cmdword = 'help' custom_handler = getattr(self, 'do_' + cmdword.lower(), None) if custom_handler: - parsed = cqlhandling.cql_whole_parse_tokens(tokens, startsymbol='cqlshCommand') + parsed = cqlhandling.cql_whole_parse_tokens(tokens, srcstr=srcstr, + startsymbol='cqlshCommand') if parsed and not parsed.remainder: # successful complete parse return custom_handler(parsed) else: - return self.handle_parse_error(cmdword, tokens, parsed) - return self.perform_statement_as_tokens(tokens) + return self.handle_parse_error(cmdword, tokens, parsed, srcstr) + return self.perform_statement(cqlhandling.cql_extract_orig(tokens, srcstr)) - def handle_parse_error(self, cmdword, tokens, parsed): + def handle_parse_error(self, cmdword, tokens, parsed, srcstr): if cmdword == 'select': # hey, maybe they know about some new syntax we don't. type # assumptions won't work, but maybe the query will. - return self.perform_statement_as_tokens(tokens) + return self.perform_statement(cqlhandling.cql_extract_orig(tokens, srcstr)) if parsed: self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0])) else: @@ -703,7 +704,7 @@ class Shell(cmd.Cmd): number, it can be enclosed in quotes and expressed as a string literal. """ ksname = parsed.get_binding('ksname') - if self.perform_statement_as_tokens(parsed.matched): + if self.perform_statement(parsed.extract_orig()): self.current_keyspace = cql_dequote(ksname) def do_select(self, parsed): @@ -731,10 +732,7 @@ class Shell(cmd.Cmd): ksname = cql_dequote(ksname) cfname = cql_dequote(parsed.get_binding('selectsource')) decoder = self.determine_decoder_for(cfname, ksname=ksname) - self.perform_statement_as_tokens(parsed.matched, decoder=decoder) - - def perform_statement_as_tokens(self, tokens, decoder=None): - return self.perform_statement(cqlhandling.cql_detokenize(tokens), decoder=decoder) + self.perform_statement(parsed.extract_orig(), decoder=decoder) def perform_statement(self, statement, decoder=None): if not statement: @@ -804,6 +802,7 @@ class Shell(cmd.Cmd): formatted_data = [map(self.myformat_value, row, coltypes) for row in self.cursor] # determine column widths + colnames = map(str, colnames) widths = map(len, colnames) for fmtrow in formatted_data: for num, col in enumerate(fmtrow): @@ -902,6 +901,10 @@ class Shell(cmd.Cmd): else: optval = cql_escape(optval) notable_columns.append((option, optval)) + for option, thriftname, _ in cqlhandling.columnfamily_map_options: + optmap = getattr(cfdef, thriftname or option) + for k, v in optmap.items(): + notable_columns.append(('%s:%s' % (option, k), cql_escape(v))) out.write('\n)') if notable_columns: joiner = 'WITH' http://git-wip-us.apache.org/repos/asf/cassandra/blob/d4f20515/pylib/cqlshlib/cqlhandling.py ---------------------------------------------------------------------- diff --git a/pylib/cqlshlib/cqlhandling.py b/pylib/cqlshlib/cqlhandling.py index 340d471..e45fb45 100644 --- a/pylib/cqlshlib/cqlhandling.py +++ b/pylib/cqlshlib/cqlhandling.py @@ -27,17 +27,43 @@ columnfamily_options = ( # (CQL option name, Thrift option name (or None if same)) ('comment', None), ('comparator', 'comparator_type'), - ('row_cache_provider', None), - ('key_cache_size', None), - ('row_cache_size', None), ('read_repair_chance', None), ('gc_grace_seconds', None), ('default_validation', 'default_validation_class'), ('min_compaction_threshold', None), ('max_compaction_threshold', None), + ('replicate_on_write', None), + ('compaction_strategy_class', 'compaction_strategy'), +) + +obsolete_cf_options = ( + ('key_cache_size', None), + ('row_cache_size', None), ('row_cache_save_period_in_seconds', None), ('key_cache_save_period_in_seconds', None), - ('replicate_on_write', None) + ('memtable_throughput_in_mb', None), + ('memtable_operations_in_millions', None), + ('memtable_flush_after_mins', None), + ('row_cache_provider', None), +) + +all_columnfamily_options = columnfamily_options + obsolete_cf_options + +columnfamily_map_options = ( + ('compaction_strategy_options', None, + ()), + ('compression_parameters', 'compression_options', + ('sstable_compression', 'chunk_length_kb', 'crc_check_chance')), +) + +available_compression_classes = ( + 'DeflateCompressor', + 'SnappyCompressor', +) + +available_compaction_classes = ( + 'LeveledCompactionStrategy', + 'SizeTieredCompactionStrategy' ) cql_type_to_apache_class = { @@ -88,8 +114,9 @@ def is_valid_cql_word(s): def tokenize_cql(cql_text): return CqlLexotron.scan(cql_text)[0] -def cql_detokenize(toklist): - return ' '.join([t[1] for t in toklist]) +def cql_extract_orig(toklist, srcstr): + # low end of span for first token, to high end of span for last token + return srcstr[toklist[0][2][0]:toklist[-1][2][1]] # note: commands_end_with_newline may be extended by an importing module. commands_end_with_newline = set() @@ -185,7 +212,8 @@ JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ; <float> ::= /-?[0-9]+\.[0-9]+/ ; <integer> ::= /-?[0-9]+/ ; <uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ; -<identifier> ::= /[a-z][a-z0-9_:]*/ ; +<identifier> ::= /[a-z][a-z0-9_]*/ ; +<colon> ::= ":" ; <star> ::= "*" ; <range> ::= ".." ; <endtoken> ::= ";" ; @@ -319,7 +347,7 @@ explain_completion('whatToSelect', 'rangestart', '<range_start>') explain_completion('whatToSelect', 'rangeend', '<range_end>') syntax_rules += r''' -<insertStatement> ::= "INSERT" "INTO" insertcf=<name> +<insertStatement> ::= "INSERT" "INTO" ( insertks=<name> "." )? insertcf=<name> "(" keyname=<colname> "," [colname]=<colname> ( "," [colname]=<colname> )* ")" "VALUES" "(" <term> "," <term> ( "," <term> )* ")" @@ -332,9 +360,22 @@ syntax_rules += r''' ; ''' +@completer_for('insertStatement', 'insertks') +def insert_ks_completer(ctxt, cass): + return [maybe_cql_escape(ks) + '.' for ks in cass.get_keyspace_names()] + @completer_for('insertStatement', 'insertcf') def insert_cf_completer(ctxt, cass): - return map(maybe_cql_escape, cass.get_columnfamily_names()) + ks = ctxt.get_binding('insertks', None) + if ks is not None: + ks = cql_dequote(ks) + try: + cfnames = cass.get_columnfamily_names(ks) + except Exception: + if ks is None: + return () + raise + return map(maybe_cql_escape, cfnames) @completer_for('insertStatement', 'keyname') def insert_keyname_completer(ctxt, cass): @@ -352,7 +393,7 @@ def insert_option_completer(ctxt, cass): return opts syntax_rules += r''' -<updateStatement> ::= "UPDATE" cf=<name> +<updateStatement> ::= "UPDATE" ( updateks=<name> "." )? updatecf=<name> ( "USING" [updateopt]=<usingOption> ( "AND" [updateopt]=<usingOption> )* )? "SET" <assignment> ( "," <assignment> )* @@ -366,9 +407,22 @@ syntax_rules += r''' ; ''' -@completer_for('updateStatement', 'cf') +@completer_for('updateStatement', 'updateks') def update_cf_completer(ctxt, cass): - return map(maybe_cql_escape, cass.get_columnfamily_names()) + return [maybe_cql_escape(ks) + '.' for ks in cass.get_keyspace_names()] + +@completer_for('updateStatement', 'updatecf') +def update_cf_completer(ctxt, cass): + ks = ctxt.get_binding('updateks', None) + if ks is not None: + ks = cql_dequote(ks) + try: + cfnames = cass.get_columnfamily_names(ks) + except Exception: + if ks is None: + return () + raise + return map(maybe_cql_escape, cfnames) @completer_for('updateStatement', 'updateopt') def insert_option_completer(ctxt, cass): @@ -413,7 +467,7 @@ def update_filter_in_completer(ctxt, cass): syntax_rules += r''' <deleteStatement> ::= "DELETE" ( [delcol]=<colname> ( "," [delcol]=<colname> )* )? - "FROM" cf=<name> + "FROM" ( deleteks=<name> "." )? deletecf=<name> ( "USING" [delopt]=<deleteOption> ( "AND" [delopt]=<deleteOption> )* )? "WHERE" <updateWhereClause> ; @@ -422,9 +476,22 @@ syntax_rules += r''' ; ''' -@completer_for('deleteStatement', 'cf') +@completer_for('deleteStatement', 'deleteks') +def update_cf_completer(ctxt, cass): + return [maybe_cql_escape(ks) + '.' for ks in cass.get_keyspace_names()] + +@completer_for('deleteStatement', 'deletecf') def delete_cf_completer(ctxt, cass): - return map(maybe_cql_escape, cass.get_columnfamily_names()) + ks = ctxt.get_binding('deleteks', None) + if ks is not None: + ks = cql_dequote(ks) + try: + cfnames = cass.get_columnfamily_names(ks) + except Exception: + if ks is None: + return () + raise + return map(maybe_cql_escape, cfnames) @completer_for('deleteStatement', 'delopt') def delete_opt_completer(ctxt, cass): @@ -457,13 +524,26 @@ def batch_opt_completer(ctxt, cass): return opts syntax_rules += r''' -<truncateStatement> ::= "TRUNCATE" cf=<name> +<truncateStatement> ::= "TRUNCATE" ( truncateks=<name> "." )? truncatecf=<name> ; ''' -@completer_for('truncateStatement', 'cf') +@completer_for('truncateStatement', 'truncateks') +def update_cf_completer(ctxt, cass): + return [maybe_cql_escape(ks) + '.' for ks in cass.get_keyspace_names()] + +@completer_for('truncateStatement', 'truncatecf') def truncate_cf_completer(ctxt, cass): - return map(maybe_cql_escape, cass.get_columnfamily_names()) + ks = ctxt.get_binding('truncateks', None) + if ks is not None: + ks = cql_dequote(ks) + try: + cfnames = cass.get_columnfamily_names(ks) + except Exception: + if ks is None: + return () + raise + return map(maybe_cql_escape, cfnames) syntax_rules += r''' <createKeyspaceStatement> ::= "CREATE" "KEYSPACE" ksname=<name> @@ -504,11 +584,14 @@ syntax_rules += r''' <createColumnFamilyStatement> ::= "CREATE" "COLUMNFAMILY" cf=<name> "(" keyalias=<colname> <storageType> "PRIMARY" "KEY" ( "," colname=<colname> <storageType> )* ")" - ( "WITH" [cfopt]=<identifier> "=" [optval]=<cfOptionVal> - ( "AND" [cfopt]=<identifier> "=" [optval]=<cfOptionVal> )* )? + ( "WITH" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal> + ( "AND" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal> )* )? ; -<cfOptionVal> ::= <storageType> - | <identifier> + +<cfOptionName> ::= cfoptname=<identifier> ( cfoptsep=":" cfsubopt=( <identifier> | <integer> ) )? + ; + +<cfOptionVal> ::= <identifier> | <stringLiteral> | <integer> | <float> @@ -518,11 +601,67 @@ syntax_rules += r''' explain_completion('createColumnFamilyStatement', 'keyalias', '<new_key_alias>') explain_completion('createColumnFamilyStatement', 'cf', '<new_columnfamily_name>') explain_completion('createColumnFamilyStatement', 'colname', '<new_column_name>') -explain_completion('createColumnFamilyStatement', 'optval', '<option_value>') -@completer_for('createColumnFamilyStatement', 'cfopt') +@completer_for('cfOptionName', 'cfoptname') def create_cf_option_completer(ctxt, cass): - return [c[0] for c in columnfamily_options] + return [c[0] for c in columnfamily_options] + \ + [c[0] + ':' for c in columnfamily_map_options] + +@completer_for('cfOptionName', 'cfoptsep') +def create_cf_suboption_separator(ctxt, cass): + opt = ctxt.get_binding('cfoptname') + if any(opt == c[0] for c in columnfamily_map_options): + return [':'] + return () + +@completer_for('cfOptionName', 'cfsubopt') +def create_cf_suboption_completer(ctxt, cass): + opt = ctxt.get_binding('cfoptname') + if opt == 'compaction_strategy_options': + # try to determine the strategy class in use + prevopts = ctxt.get_binding('cfopt', ()) + prevvals = ctxt.get_binding('optval', ()) + for prevopt, prevval in zip(prevopts, prevvals): + if prevopt == 'compaction_strategy_class': + csc = cql_dequote(prevval) + break + else: + cf = ctxt.get_binding('cf') + try: + csc = cass.get_columnfamily(cf).compaction_strategy + except Exception: + csc = '' + csc = csc.split('.')[-1] + if csc == 'SizeTieredCompactionStrategy': + return ['min_sstable_size'] + elif csc == 'LeveledCompactionStrategy': + return ['sstable_size_in_mb'] + for optname, _, subopts in columnfamily_map_options: + if opt == optname: + return subopts + return () + +def create_cf_option_val_completer(ctxt, cass): + exist_opts = ctxt.get_binding('cfopt') + this_opt = exist_opts[-1] + if this_opt == 'compression_parameters:sstable_compression': + return map(cql_escape, available_compression_classes) + if this_opt == 'compaction_strategy_class': + return map(cql_escape, available_compaction_classes) + if any(this_opt == opt[0] for opt in obsolete_cf_options): + return ["'<obsolete_option>'"] + if this_opt in ('comparator', 'default_validation'): + return cql_types + if this_opt == 'read_repair_chance': + return [Hint('<float_between_0_and_1>')] + if this_opt == 'replicate_on_write': + return [Hint('<yes_or_no>')] + if this_opt in ('min_compaction_threshold', 'max_compaction_threshold', 'gc_grace_seconds'): + return [Hint('<integer>')] + return [Hint('<option_value>')] + +completer_for('createColumnFamilyStatement', 'optval') \ + (create_cf_option_val_completer) syntax_rules += r''' <createIndexStatement> ::= "CREATE" "INDEX" indexname=<identifier>? "ON" @@ -575,6 +714,8 @@ syntax_rules += r''' <alterInstructions> ::= "ALTER" existcol=<name> "TYPE" <storageType> | "ADD" newcol=<name> <storageType> | "DROP" existcol=<name> + | "WITH" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal> + ( "AND" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal> )* ; ''' @@ -589,6 +730,9 @@ def alter_table_col_completer(ctxt, cass): explain_completion('alterInstructions', 'newcol', '<new_column_name>') +completer_for('alterInstructions', 'optval') \ + (create_cf_option_val_completer) + # END SYNTAX/COMPLETION RULE DEFINITIONS @@ -608,13 +752,10 @@ def cql_add_completer(rulename, symname): def cql_parse(text, startsymbol='Start'): tokens = CqlRuleSet.lex(text) tokens = cql_massage_tokens(tokens) - return cql_parse_tokens(tokens, startsymbol) - -def cql_parse_tokens(toklist, startsymbol='Start'): - return CqlRuleSet.parse(startsymbol, toklist) + return CqlRuleSet.parse(startsymbol, tokens, init_bindings={'*SRC*': text}) -def cql_whole_parse_tokens(toklist, startsymbol='Start'): - return CqlRuleSet.whole_match(startsymbol, toklist) +def cql_whole_parse_tokens(toklist, srcstr=None, startsymbol='Start'): + return CqlRuleSet.whole_match(startsymbol, toklist, srcstr=srcstr) def cql_massage_tokens(toklist): curstmt = [] @@ -625,7 +766,7 @@ def cql_massage_tokens(toklist): for t in toklist: if t[0] == 'endline': if term_on_nl: - t = ('endtoken', '\n') + t = ('endtoken',) + t[1:] else: # don't put any 'endline' tokens in output continue @@ -716,6 +857,7 @@ def cql_complete_single(text, partial, init_bindings={}, ignore_case=True, start if tokens and tokens[-1][0] == 'unclosedComment': return [] bindings['partial'] = partial + bindings['*SRC*'] = text # find completions for the position completions = CqlRuleSet.complete(startsymbol, tokens, bindings) http://git-wip-us.apache.org/repos/asf/cassandra/blob/d4f20515/pylib/cqlshlib/pylexotron.py ---------------------------------------------------------------------- diff --git a/pylib/cqlshlib/pylexotron.py b/pylib/cqlshlib/pylexotron.py index ff099ff..7482aba 100644 --- a/pylib/cqlshlib/pylexotron.py +++ b/pylib/cqlshlib/pylexotron.py @@ -88,6 +88,18 @@ class ParseContext: return self.__class__(self.ruleset, self.bindings, self.matched, self.remainder, newname) + def extract_orig(self, tokens=None): + if tokens is None: + tokens = self.matched + if not tokens: + return '' + orig = self.bindings.get('*SRC*', None) + if orig is None: + # pretty much just guess + return ' '.join([t[1] for t in tokens]) + # low end of span for first token, to high end of span for last token + return orig[tokens[0][2][0]:tokens[-1][2][1]] + def __repr__(self): return '<%s matched=%r remainder=%r prodname=%r bindings=%r>' \ % (self.__class__.__name__, self.matched, self.remainder, self.productionname, self.bindings) @@ -183,7 +195,7 @@ class named_symbol(matcher): # don't collect other completions under this; use a dummy pass_in_compls = set() results = self.arg.match_with_results(ctxt, pass_in_compls) - return [c.with_binding(self.name, tokens_to_text(matchtoks)) for (c, matchtoks) in results] + return [c.with_binding(self.name, ctxt.extract_orig(matchtoks)) for (c, matchtoks) in results] def __repr__(self): return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.arg) @@ -197,7 +209,7 @@ class named_collector(named_symbol): output = [] for ctxt, matchtoks in self.arg.match_with_results(ctxt, pass_in_compls): oldval = ctxt.get_binding(self.name, ()) - output.append(ctxt.with_binding(self.name, oldval + (tokens_to_text(matchtoks),))) + output.append(ctxt.with_binding(self.name, oldval + (ctxt.extract_orig(matchtoks),))) return output class terminal_matcher(matcher): @@ -257,9 +269,6 @@ class case_match(text_match): def pattern(self): return re.escape(self.arg) -def tokens_to_text(toks): - return ' '.join([t[1] for t in toks]) - class ParsingRuleSet: RuleSpecScanner = SaferScanner([ (r'::=', lambda s,t: t), @@ -381,7 +390,7 @@ class ParsingRuleSet: def make_handler(name): if name == 'JUNK': return None - return lambda s, t: (name, t) + return lambda s, t: (name, t, s.match.span()) regexes = [(p.pattern(), make_handler(name)) for (name, p) in self.terminals] return SaferScanner(regexes, re.I | re.S).scan @@ -400,13 +409,16 @@ class ParsingRuleSet: pattern = self.ruleset[startsymbol] return pattern.match(ctxt, None) - def whole_match(self, startsymbol, tokens): - newctxts = [c for c in self.parse(startsymbol, tokens) if not c.remainder] - if newctxts: - return newctxts[0] + def whole_match(self, startsymbol, tokens, srcstr=None): + bindings = {} + if srcstr is not None: + bindings['*SRC*'] = srcstr + for c in self.parse(startsymbol, tokens, init_bindings=bindings): + if not c.remainder: + return c def lex_and_parse(self, text, startsymbol='Start'): - return self.parse(startsymbol, self.lex(text)) + return self.parse(startsymbol, self.lex(text), init_bindings={'*SRC*': text}) def complete(self, startsymbol, tokens, init_bindings=None): if init_bindings is None: @@ -442,11 +454,12 @@ class Debugotron(set): lineno = frame.f_lineno if 'self' in frame.f_locals: clsobj = frame.f_locals['self'] - cls = clsobj.__class__ line = '%s.%s() (%s:%d)' % (clsobj, name, filename, lineno) else: line = '%s (%s:%d)' % (name, filename, lineno) - self.stream.write(' %s\n' % (line,)) + self.stream.write(' - %s\n' % (line,)) + if i == 0 and 'ctxt' in frame.f_locals: + self.stream.write(' - %s\n' % (frame.f_locals['ctxt'],)) frame = frame.f_back def update(self, items):
