Replace INTO N BUCKETS with PARTITIONS N in CREATE TABLE

This commit also removes the now unused `DISTRIBUTE`, `SPLIT`, and
`BUCKETS` keywords that were going to be newly released in Impala 2.6,
but are now unused. Additionally, a few remaining uses of the
`DISTRIBUTE BY` syntax has been switched to `PARTITION BY`.

Change-Id: I32fdd5ef26c532f7a30220db52bdfbf228165922
Reviewed-on: http://gerrit.cloudera.org:8080/5382
Reviewed-by: Matthew Jacobs <[email protected]>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/f83652c1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/f83652c1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/f83652c1

Branch: refs/heads/master
Commit: f83652c1da61c3996bd8c812dd57d2dd00e8ce3c
Parents: 6098ac7
Author: Dan Burkert <[email protected]>
Authored: Mon Dec 5 13:44:41 2016 -0800
Committer: Internal Jenkins <[email protected]>
Committed: Wed Dec 7 07:31:16 2016 +0000

----------------------------------------------------------------------
 common/thrift/CatalogObjects.thrift             |  2 +-
 docs/topics/impala_create_table.xml             |  8 +-
 docs/topics/impala_kudu.xml                     | 15 ++-
 docs/topics/impala_partitioning.xml             |  2 +-
 docs/topics/impala_reserved_words.xml           |  3 -
 fe/src/main/cup/sql-parser.cup                  | 21 ++---
 .../impala/analysis/KuduPartitionParam.java     | 27 +++---
 .../org/apache/impala/analysis/TableDef.java    |  2 +-
 .../org/apache/impala/catalog/KuduTable.java    |  2 +-
 .../impala/service/KuduCatalogOpExecutor.java   |  2 +-
 fe/src/main/jflex/sql-scanner.flex              |  3 -
 .../apache/impala/analysis/AnalyzeDDLTest.java  | 98 ++++++++++----------
 .../impala/analysis/AuthorizationTest.java      |  2 +-
 .../org/apache/impala/analysis/ParserTest.java  | 32 +++----
 testdata/bin/generate-schema-statements.py      |  2 +-
 .../functional/functional_schema_template.sql   | 16 ++--
 testdata/datasets/tpcds/tpcds_kudu_template.sql | 48 +++++-----
 testdata/datasets/tpch/tpch_kudu_template.sql   | 16 ++--
 testdata/datasets/tpch/tpch_schema_template.sql | 16 ++--
 .../queries/PlannerTest/lineage.test            |  4 +-
 .../queries/QueryTest/kudu-scan-node.test       |  6 +-
 .../QueryTest/kudu-timeouts-catalogd.test       |  2 +-
 .../queries/QueryTest/kudu_alter.test           |  4 +-
 .../queries/QueryTest/kudu_create.test          | 18 ++--
 .../queries/QueryTest/kudu_delete.test          |  4 +-
 .../queries/QueryTest/kudu_describe.test        |  2 +-
 .../queries/QueryTest/kudu_insert.test          |  8 +-
 .../queries/QueryTest/kudu_partition_ddl.test   | 10 +-
 .../queries/QueryTest/kudu_upsert.test          |  2 +-
 tests/comparison/db_connection.py               |  2 +-
 tests/comparison/tests/test_cursor.py           |  4 +-
 tests/query_test/test_cancellation.py           |  2 +-
 tests/query_test/test_kudu.py                   | 36 +++----
 tests/shell/test_shell_commandline.py           |  2 +-
 34 files changed, 204 insertions(+), 219 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/common/thrift/CatalogObjects.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/CatalogObjects.thrift 
b/common/thrift/CatalogObjects.thrift
index de89b51..f7ce00b 100644
--- a/common/thrift/CatalogObjects.thrift
+++ b/common/thrift/CatalogObjects.thrift
@@ -360,7 +360,7 @@ struct TDataSourceTable {
 // Parameters needed for hash partitioning
 struct TKuduPartitionByHashParam {
   1: required list<string> columns
-  2: required i32 num_buckets
+  2: required i32 num_partitions
 }
 
 struct TRangePartition {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/docs/topics/impala_create_table.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_create_table.xml 
b/docs/topics/impala_create_table.xml
index ef6ac39..bd2e12c 100644
--- a/docs/topics/impala_create_table.xml
+++ b/docs/topics/impala_create_table.xml
@@ -279,18 +279,18 @@ file_format:
 
 <!--
     <p rev="kudu">
-      <b>Partitioning for Kudu tables (DISTRIBUTE BY clause)</b>
+      <b>Partitioning for Kudu tables (PARTITION BY clause)</b>
     </p>
 
     <p rev="kudu">
       For Kudu tables, you specify logical partitioning across one or more 
columns using the
-      <codeph>DISTRIBUTE BY</codeph> clause. In contrast to partitioning for 
HDFS-based tables,
+      <codeph>PARTITION BY</codeph> clause. In contrast to partitioning for 
HDFS-based tables,
       multiple values for a partition key column can be located in the same 
partition.
       The optional <codeph>HASH</codeph> clause lets you divide one or a set 
of partition key columns
-      into a specified number of buckets; you can use more than one 
<codeph>HASH</codeph>
+      into a specified number of partitions; you can use more than one 
<codeph>HASH</codeph>
       clause, specifying a distinct set of partition key columns for each.
       The optional <codeph>RANGE</codeph> clause further subdivides the 
partitions, based on
-      a set of literal values for the partition key columns.
+      a range of values for the partition key columns.
     </p>
 -->
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/docs/topics/impala_kudu.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_kudu.xml b/docs/topics/impala_kudu.xml
index 5d909b2..186336b 100644
--- a/docs/topics/impala_kudu.xml
+++ b/docs/topics/impala_kudu.xml
@@ -147,14 +147,13 @@ under the License.
         Kudu tables use special mechanisms to evenly distribute data among the 
underlying tablet servers. Although
         we refer to such tables as partitioned tables, they are distinguished 
from traditional Impala partitioned
         tables by use of different clauses on the <codeph>CREATE 
TABLE</codeph> statement. Partitioned Kudu tables
-        use <codeph>DISTRIBUTE BY</codeph>, <codeph>HASH</codeph>, 
<codeph>RANGE</codeph>, and <codeph>SPLIT
-        ROWS</codeph> clauses rather than the traditional <codeph>PARTITIONED 
BY</codeph> clause. All of the
-        columns involved in these clauses must be primary key columns. These 
clauses let you specify different ways
-        to divide the data for each column, or even for different value ranges 
within a column. This flexibility
-        lets you avoid problems with uneven distribution of data, where the 
partitioning scheme for HDFS tables
-        might result in some partitions being much larger than others. By 
setting up an effective partitioning
-        scheme for a Kudu table, you can ensure that the work for a query can 
be parallelized evenly across the
-        hosts in a cluster.
+        use <codeph>PARTITION BY</codeph>, <codeph>HASH</codeph>, and 
<codeph>RANGE</codeph> clauses rather than
+        the traditional <codeph>PARTITIONED BY</codeph> clause. All of the 
columns involved in these clauses must
+        be primary key columns. These clauses let you specify different ways 
to divide the data for each column,
+        or even for different value ranges within a column. This flexibility 
lets you avoid problems with uneven
+        distribution of data, where the partitioning scheme for HDFS tables 
might result in some partitions being
+        much larger than others. By setting up an effective partitioning 
scheme for a Kudu table, you can ensure
+        that the work for a query can be parallelized evenly across the hosts 
in a cluster.
       </p>
 
     </conbody>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/docs/topics/impala_partitioning.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_partitioning.xml 
b/docs/topics/impala_partitioning.xml
index ae7310c..31fdcfc 100644
--- a/docs/topics/impala_partitioning.xml
+++ b/docs/topics/impala_partitioning.xml
@@ -588,7 +588,7 @@ SELECT COUNT(*) FROM sales_table WHERE year IN (2005, 2010, 
2015);
     <conbody>
 
       <p>
-        Kudu tables use a more fine-grained partitioning scheme than tables 
containing HDFS data files. You specify a <codeph>DISTRIBUTE
+        Kudu tables use a more fine-grained partitioning scheme than tables 
containing HDFS data files. You specify a <codeph>PARTITION
         BY</codeph> clause with the <codeph>CREATE TABLE</codeph> statement to 
identify how to divide the values from the partition key
         columns.
       </p>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/docs/topics/impala_reserved_words.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_reserved_words.xml 
b/docs/topics/impala_reserved_words.xml
index 46ec4a8..080735a 100644
--- a/docs/topics/impala_reserved_words.xml
+++ b/docs/topics/impala_reserved_words.xml
@@ -83,7 +83,6 @@ between
 bigint
 <ph rev="1.4.0">binary</ph>
 boolean
-<ph rev="2.6.0">buckets</ph>
 by
 <ph rev="1.4.0">cached</ph>
 <ph rev="2.3.0">cascade</ph>
@@ -111,7 +110,6 @@ delimited
 desc
 describe
 distinct
-<ph rev="2.6.0">distribute</ph>
 div
 double
 drop
@@ -212,7 +210,6 @@ serdeproperties
 set
 show
 smallint
-<ph rev="2.6.0">split</ph>
 stats
 stored
 straight_join

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/fe/src/main/cup/sql-parser.cup
----------------------------------------------------------------------
diff --git a/fe/src/main/cup/sql-parser.cup b/fe/src/main/cup/sql-parser.cup
index e09993b..b66483b 100644
--- a/fe/src/main/cup/sql-parser.cup
+++ b/fe/src/main/cup/sql-parser.cup
@@ -245,11 +245,11 @@ parser code {:
 terminal
   KW_ADD, KW_AGGREGATE, KW_ALL, KW_ALTER, KW_ANALYTIC, KW_AND, KW_ANTI, 
KW_API_VERSION,
   KW_ARRAY, KW_AS, KW_ASC, KW_AVRO, KW_BETWEEN, KW_BIGINT, KW_BINARY, 
KW_BLOCKSIZE,
-  KW_BOOLEAN, KW_BUCKETS, KW_BY, KW_CACHED, KW_CASCADE, KW_CASE, KW_CAST, 
KW_CHANGE,
+  KW_BOOLEAN, KW_BY, KW_CACHED, KW_CASCADE, KW_CASE, KW_CAST, KW_CHANGE,
   KW_CHAR, KW_CLASS, KW_CLOSE_FN, KW_COLUMN, KW_COLUMNS, KW_COMMENT, 
KW_COMPRESSION,
   KW_COMPUTE, KW_CREATE, KW_CROSS, KW_CURRENT, KW_DATA, KW_DATABASE, 
KW_DATABASES,
   KW_DATE, KW_DATETIME, KW_DECIMAL, KW_DEFAULT, KW_DELETE, KW_DELIMITED, 
KW_DESC,
-  KW_DESCRIBE, KW_DISTINCT, KW_DISTRIBUTE, KW_DIV, KW_DOUBLE, KW_DROP, KW_ELSE,
+  KW_DESCRIBE, KW_DISTINCT, KW_DIV, KW_DOUBLE, KW_DROP, KW_ELSE,
   KW_ENCODING, KW_END, KW_ESCAPED, KW_EXISTS, KW_EXPLAIN, KW_EXTENDED, 
KW_EXTERNAL,
   KW_FALSE, KW_FIELDS, KW_FILEFORMAT, KW_FILES, KW_FINALIZE_FN, KW_FIRST, 
KW_FLOAT,
   KW_FOLLOWING, KW_FOR, KW_FORMAT, KW_FORMATTED, KW_FROM, KW_FULL, KW_FUNCTION,
@@ -263,7 +263,7 @@ terminal
   KW_PURGE, KW_RANGE, KW_RCFILE, KW_RECOVER, KW_REFRESH, KW_REGEXP, KW_RENAME, 
KW_REPLACE,
   KW_REPLICATION, KW_RESTRICT, KW_RETURNS, KW_REVOKE, KW_RIGHT, KW_RLIKE, 
KW_ROLE,
   KW_ROLES, KW_ROW, KW_ROWS, KW_SCHEMA, KW_SCHEMAS, KW_SELECT, KW_SEMI, 
KW_SEQUENCEFILE,
-  KW_SERDEPROPERTIES, KW_SERIALIZE_FN, KW_SET, KW_SHOW, KW_SMALLINT, KW_SPLIT, 
KW_STORED,
+  KW_SERDEPROPERTIES, KW_SERIALIZE_FN, KW_SET, KW_SHOW, KW_SMALLINT, KW_STORED,
   KW_STRAIGHT_JOIN, KW_STRING, KW_STRUCT, KW_SYMBOL, KW_TABLE, KW_TABLES,
   KW_TBLPROPERTIES, KW_TERMINATED, KW_TEXTFILE, KW_THEN, KW_TIMESTAMP, 
KW_TINYINT,
   KW_TRUNCATE, KW_STATS, KW_TO, KW_TRUE, KW_UNBOUNDED, KW_UNCACHED, KW_UNION, 
KW_UPDATE,
@@ -1191,13 +1191,12 @@ hash_partition_param_list ::=
 
 // The column list for a HASH clause is optional.
 hash_partition_param ::=
-  KW_HASH LPAREN ident_list:cols RPAREN KW_INTO
-    INTEGER_LITERAL:buckets KW_BUCKETS
-  {: RESULT = KuduPartitionParam.createHashParam(cols, buckets.intValue()); :}
-  | KW_HASH KW_INTO INTEGER_LITERAL:buckets KW_BUCKETS
+  KW_HASH LPAREN ident_list:cols RPAREN KW_PARTITIONS 
INTEGER_LITERAL:numPartitions
+  {: RESULT = KuduPartitionParam.createHashParam(cols, 
numPartitions.intValue()); :}
+  | KW_HASH KW_PARTITIONS INTEGER_LITERAL:numPartitions
   {:
     RESULT = KuduPartitionParam.createHashParam(Lists.<String>newArrayList(),
-        buckets.intValue());
+        numPartitions.intValue());
   :}
   ;
 
@@ -3061,8 +3060,6 @@ ident_or_keyword ::=
   {: RESULT = r.toString(); :}
   | KW_BOOLEAN:r
   {: RESULT = r.toString(); :}
-  | KW_BUCKETS:r
-  {: RESULT = r.toString(); :}
   | KW_BY:r
   {: RESULT = r.toString(); :}
   | KW_CACHED:r
@@ -3121,8 +3118,6 @@ ident_or_keyword ::=
   {: RESULT = r.toString(); :}
   | KW_DISTINCT:r
   {: RESULT = r.toString(); :}
-  | KW_DISTRIBUTE:r
-  {: RESULT = r.toString(); :}
   | KW_DIV:r
   {: RESULT = r.toString(); :}
   | KW_DOUBLE:r
@@ -3333,8 +3328,6 @@ ident_or_keyword ::=
   {: RESULT = r.toString(); :}
   | KW_SMALLINT:r
   {: RESULT = r.toString(); :}
-  | KW_SPLIT:r
-  {: RESULT = r.toString(); :}
   | KW_STORED:r
   {: RESULT = r.toString(); :}
   | KW_STRAIGHT_JOIN:r

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/fe/src/main/java/org/apache/impala/analysis/KuduPartitionParam.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/analysis/KuduPartitionParam.java 
b/fe/src/main/java/org/apache/impala/analysis/KuduPartitionParam.java
index 3f69fae..5cfb2fc 100644
--- a/fe/src/main/java/org/apache/impala/analysis/KuduPartitionParam.java
+++ b/fe/src/main/java/org/apache/impala/analysis/KuduPartitionParam.java
@@ -37,7 +37,7 @@ import com.google.common.collect.Lists;
  *
  * Examples:
  * - Hash-based:
- *   PARTITION BY HASH(id) INTO 10 BUCKETS
+ *   PARTITION BY HASH(id) PARTITIONS 10
  * - Single column range-based:
  *   PARTITION BY RANGE(age)
  *   (
@@ -47,7 +47,7 @@ import com.google.common.collect.Lists;
  *     PARTITION VALUE = 100
  *   )
  * - Combination of hash and range based:
- *   PARTITION BY HASH (id) INTO 3 BUCKETS,
+ *   PARTITION BY HASH (id) PARTITIONS 3,
  *   RANGE (age)
  *   (
  *     PARTITION 10 <= VALUES < 20,
@@ -67,8 +67,8 @@ public class KuduPartitionParam implements ParseNode {
   /**
    * Creates a hash-based KuduPartitionParam.
    */
-  public static KuduPartitionParam createHashParam(List<String> cols, int 
buckets) {
-    return new KuduPartitionParam(Type.HASH, cols, buckets, null);
+  public static KuduPartitionParam createHashParam(List<String> cols, int 
numPartitions) {
+    return new KuduPartitionParam(Type.HASH, cols, numPartitions, null);
   }
 
   /**
@@ -76,10 +76,10 @@ public class KuduPartitionParam implements ParseNode {
    */
   public static KuduPartitionParam createRangeParam(List<String> cols,
       List<RangePartition> rangePartitions) {
-    return new KuduPartitionParam(Type.RANGE, cols, NO_BUCKETS, 
rangePartitions);
+    return new KuduPartitionParam(Type.RANGE, cols, NO_HASH_PARTITIONS, 
rangePartitions);
   }
 
-  private static final int NO_BUCKETS = -1;
+  private static final int NO_HASH_PARTITIONS = -1;
 
   /**
    * The partitioning type.
@@ -100,17 +100,17 @@ public class KuduPartitionParam implements ParseNode {
   private final Type type_;
 
   // Only relevant for hash-based partitioning, -1 otherwise
-  private final int numBuckets_;
+  private final int numHashPartitions_;
 
   // List of range partitions specified in a range-based partitioning.
   private List<RangePartition> rangePartitions_;
 
-  private KuduPartitionParam(Type t, List<String> colNames, int buckets,
+  private KuduPartitionParam(Type t, List<String> colNames, int 
numHashPartitions,
       List<RangePartition> partitions) {
     type_ = t;
     for (String name: colNames) colNames_.add(name.toLowerCase());
     rangePartitions_ = partitions;
-    numBuckets_ = buckets;
+    numHashPartitions_ = numHashPartitions;
   }
 
   @Override
@@ -149,9 +149,8 @@ public class KuduPartitionParam implements ParseNode {
       Joiner.on(", ").appendTo(builder, colNames_).append(")");
     }
     if (type_ == Type.HASH) {
-      builder.append(" INTO ");
-      Preconditions.checkState(numBuckets_ != NO_BUCKETS);
-      builder.append(numBuckets_).append(" BUCKETS");
+      Preconditions.checkState(numHashPartitions_ != NO_HASH_PARTITIONS);
+      builder.append(" PARTITIONS ").append(numHashPartitions_);
     } else {
       builder.append(" (");
       if (rangePartitions_ != null) {
@@ -176,8 +175,8 @@ public class KuduPartitionParam implements ParseNode {
     // TODO: Add a validate() function to ensure the validity of distribute 
params.
     if (type_ == Type.HASH) {
       TKuduPartitionByHashParam hash = new TKuduPartitionByHashParam();
-      Preconditions.checkState(numBuckets_ != NO_BUCKETS);
-      hash.setNum_buckets(numBuckets_);
+      Preconditions.checkState(numHashPartitions_ != NO_HASH_PARTITIONS);
+      hash.setNum_partitions(numHashPartitions_);
       hash.setColumns(colNames_);
       result.setBy_hash_param(hash);
     } else {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/fe/src/main/java/org/apache/impala/analysis/TableDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableDef.java 
b/fe/src/main/java/org/apache/impala/analysis/TableDef.java
index a40b4d2..ae314c3 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableDef.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.fs.permission.FsAction;
  * - EXTERNAL
  * - IF NOT EXISTS
  * - PARTITIONED BY
- * - DISTRIBUTE BY
+ * - PARTITION BY
  * - ROWFORMAT
  * - FILEFORMAT
  * - COMMENT

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java 
b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
index 9bbcbd5..ae2a640 100644
--- a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
@@ -333,7 +333,7 @@ public class KuduTable extends Table {
       if (param.isSetBy_hash_param()) {
         TKuduPartitionByHashParam hashParam = param.getBy_hash_param();
         partitionBy_.add(KuduPartitionParam.createHashParam(
-            hashParam.getColumns(), hashParam.getNum_buckets()));
+            hashParam.getColumns(), hashParam.getNum_partitions()));
       } else {
         Preconditions.checkState(param.isSetBy_range_param());
         TKuduPartitionByRangeParam rangeParam = param.getBy_range_param();

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java 
b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
index b0616c4..6fc5674 100644
--- a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
@@ -146,7 +146,7 @@ public class KuduCatalogOpExecutor {
         if (partParam.isSetBy_hash_param()) {
           Preconditions.checkState(!partParam.isSetBy_range_param());
           
tableOpts.addHashPartitions(partParam.getBy_hash_param().getColumns(),
-              partParam.getBy_hash_param().getNum_buckets());
+              partParam.getBy_hash_param().getNum_partitions());
         } else {
           Preconditions.checkState(partParam.isSetBy_range_param());
           hasRangePartitioning = true;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/fe/src/main/jflex/sql-scanner.flex
----------------------------------------------------------------------
diff --git a/fe/src/main/jflex/sql-scanner.flex 
b/fe/src/main/jflex/sql-scanner.flex
index 3d3b24d..982e9a2 100644
--- a/fe/src/main/jflex/sql-scanner.flex
+++ b/fe/src/main/jflex/sql-scanner.flex
@@ -70,7 +70,6 @@ import org.apache.impala.analysis.SqlParserSymbols;
     keywordMap.put("binary", new Integer(SqlParserSymbols.KW_BINARY));
     keywordMap.put("block_size", new Integer(SqlParserSymbols.KW_BLOCKSIZE));
     keywordMap.put("boolean", new Integer(SqlParserSymbols.KW_BOOLEAN));
-    keywordMap.put("buckets", new Integer(SqlParserSymbols.KW_BUCKETS));
     keywordMap.put("by", new Integer(SqlParserSymbols.KW_BY));
     keywordMap.put("cached", new Integer(SqlParserSymbols.KW_CACHED));
     keywordMap.put("case", new Integer(SqlParserSymbols.KW_CASE));
@@ -100,7 +99,6 @@ import org.apache.impala.analysis.SqlParserSymbols;
     keywordMap.put("desc", new Integer(SqlParserSymbols.KW_DESC));
     keywordMap.put("describe", new Integer(SqlParserSymbols.KW_DESCRIBE));
     keywordMap.put("distinct", new Integer(SqlParserSymbols.KW_DISTINCT));
-    keywordMap.put("distribute", new Integer(SqlParserSymbols.KW_DISTRIBUTE));
     keywordMap.put("div", new Integer(SqlParserSymbols.KW_DIV));
     keywordMap.put("double", new Integer(SqlParserSymbols.KW_DOUBLE));
     keywordMap.put("drop", new Integer(SqlParserSymbols.KW_DROP));
@@ -209,7 +207,6 @@ import org.apache.impala.analysis.SqlParserSymbols;
     keywordMap.put("set", new Integer(SqlParserSymbols.KW_SET));
     keywordMap.put("show", new Integer(SqlParserSymbols.KW_SHOW));
     keywordMap.put("smallint", new Integer(SqlParserSymbols.KW_SMALLINT));
-    keywordMap.put("split", new Integer(SqlParserSymbols.KW_SPLIT));
     keywordMap.put("stats", new Integer(SqlParserSymbols.KW_STATS));
     keywordMap.put("stored", new Integer(SqlParserSymbols.KW_STORED));
     keywordMap.put("straight_join", new 
Integer(SqlParserSymbols.KW_STRAIGHT_JOIN));

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java 
b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
index d806a2f..d1b6a8d 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
@@ -1440,7 +1440,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "Partition column name mismatch: tinyint_col != int_col");
 
     // CTAS into managed Kudu tables
-    AnalyzesOk("create table t primary key (id) partition by hash (id) into 3 
buckets" +
+    AnalyzesOk("create table t primary key (id) partition by hash (id) 
partitions 3" +
         " stored as kudu as select id, bool_col, tinyint_col, smallint_col, 
int_col, " +
         "bigint_col, float_col, double_col, date_string_col, string_col " +
         "from functional.alltypestiny");
@@ -1449,7 +1449,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "stored as kudu as select id, bool_col, tinyint_col, smallint_col, 
int_col, " +
         "bigint_col, float_col, double_col, date_string_col, string_col " +
         "from functional.alltypestiny");
-    AnalyzesOk("create table t primary key (id) partition by hash (id) into 3 
buckets, "+
+    AnalyzesOk("create table t primary key (id) partition by hash (id) 
partitions 3, "+
         "range (id) (partition values < 10, partition 10 <= values < 20, " +
         "partition value = 30) stored as kudu as select id, bool_col, 
tinyint_col, " +
         "smallint_col, int_col, bigint_col, float_col, double_col, 
date_string_col, " +
@@ -1461,27 +1461,27 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "external Kudu tables.");
 
     // CTAS into Kudu tables with unsupported types
-    AnalysisError("create table t primary key (id) partition by hash into 3 
buckets" +
+    AnalysisError("create table t primary key (id) partition by hash 
partitions 3" +
         " stored as kudu as select id, timestamp_col from 
functional.alltypestiny",
         "Cannot create table 't': Type TIMESTAMP is not supported in Kudu");
-    AnalysisError("create table t primary key (cs) partition by hash into 3 
buckets" +
+    AnalysisError("create table t primary key (cs) partition by hash 
partitions 3" +
         " stored as kudu as select cs from functional.chars_tiny",
         "Cannot create table 't': Type CHAR(5) is not supported in Kudu");
-    AnalysisError("create table t primary key (vc) partition by hash into 3 
buckets" +
+    AnalysisError("create table t primary key (vc) partition by hash 
partitions 3" +
         " stored as kudu as select vc from functional.chars_tiny",
         "Cannot create table 't': Type VARCHAR(32) is not supported in Kudu");
-    AnalysisError("create table t primary key (id) partition by hash into 3 
buckets" +
+    AnalysisError("create table t primary key (id) partition by hash 
partitions 3" +
         " stored as kudu as select c1 as id from functional.decimal_tiny",
         "Cannot create table 't': Type DECIMAL(10,4) is not supported in 
Kudu");
-    AnalysisError("create table t primary key (id) partition by hash into 3 
buckets" +
+    AnalysisError("create table t primary key (id) partition by hash 
partitions 3" +
         " stored as kudu as select id, s from 
functional.complextypes_fileformat",
         "Expr 's' in select list returns a complex type 
'STRUCT<f1:STRING,f2:INT>'.\n" +
         "Only scalar types are allowed in the select list.");
-    AnalysisError("create table t primary key (id) partition by hash into 3 
buckets" +
+    AnalysisError("create table t primary key (id) partition by hash 
partitions 3" +
         " stored as kudu as select id, m from 
functional.complextypes_fileformat",
         "Expr 'm' in select list returns a complex type 
'MAP<STRING,BIGINT>'.\n" +
         "Only scalar types are allowed in the select list.");
-    AnalysisError("create table t primary key (id) partition by hash into 3 
buckets" +
+    AnalysisError("create table t primary key (id) partition by hash 
partitions 3" +
         " stored as kudu as select id, a from 
functional.complextypes_fileformat",
         "Expr 'a' in select list returns a complex type 'ARRAY<INT>'.\n" +
         "Only scalar types are allowed in the select list.");
@@ -1914,17 +1914,17 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     TestUtils.assumeKuduIsSupported();
     // Test primary keys and partition by clauses
     AnalyzesOk("create table tab (x int primary key) partition by hash(x) " +
-        "into 8 buckets stored as kudu");
+        "partitions 8 stored as kudu");
     AnalyzesOk("create table tab (x int, primary key(x)) partition by hash(x) 
" +
-        "into 8 buckets stored as kudu");
+        "partitions 8 stored as kudu");
     AnalyzesOk("create table tab (x int, y int, primary key (x, y)) " +
-        "partition by hash(x, y) into 8 buckets stored as kudu");
+        "partition by hash(x, y) partitions 8 stored as kudu");
     AnalyzesOk("create table tab (x int, y int, primary key (x)) " +
-        "partition by hash(x) into 8 buckets stored as kudu");
+        "partition by hash(x) partitions 8 stored as kudu");
     AnalyzesOk("create table tab (x int, y int, primary key(x, y)) " +
-        "partition by hash(y) into 8 buckets stored as kudu");
+        "partition by hash(y) partitions 8 stored as kudu");
     AnalyzesOk("create table tab (x int, y string, primary key (x)) partition 
by " +
-        "hash (x) into 3 buckets, range (x) (partition values < 1, partition " 
+
+        "hash (x) partitions 3, range (x) (partition values < 1, partition " +
         "1 <= values < 10, partition 10 <= values < 20, partition value = 30) 
" +
         "stored as kudu");
     AnalyzesOk("create table tab (x int, y int, primary key (x, y)) partition 
by " +
@@ -1952,24 +1952,24 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     // Multilevel partitioning. Data is split into 3 buckets based on 'x' and 
each
     // bucket is partitioned into 4 tablets based on the range partitions of 
'y'.
     AnalyzesOk("create table tab (x int, y string, primary key(x, y)) " +
-        "partition by hash(x) into 3 buckets, range(y) " +
+        "partition by hash(x) partitions 3, range(y) " +
         "(partition values < 'aa', partition 'aa' <= values < 'bb', " +
         "partition 'bb' <= values < 'cc', partition 'cc' <= values) " +
         "stored as kudu");
     // Key column in upper case
     AnalyzesOk("create table tab (x int, y int, primary key (X)) " +
-        "partition by hash (x) into 8 buckets stored as kudu");
+        "partition by hash (x) partitions 8 stored as kudu");
     // Flexible Partitioning
     AnalyzesOk("create table tab (a int, b int, c int, d int, primary key (a, 
b, c))" +
-        "partition by hash (a, b) into 8 buckets, hash(c) into 2 buckets 
stored as " +
+        "partition by hash (a, b) partitions 8, hash(c) partitions 2 stored as 
" +
         "kudu");
     // No columns specified in the PARTITION BY HASH clause
     AnalyzesOk("create table tab (a int primary key, b int, c int, d int) " +
-        "partition by hash into 8 buckets stored as kudu");
+        "partition by hash partitions 8 stored as kudu");
     // Distribute range data types are picked up during analysis and forwarded 
to Kudu.
     // Column names in distribute params should also be case-insensitive.
     AnalyzesOk("create table tab (a int, b int, c int, d int, primary key(a, 
b, c, d))" +
-        "partition by hash (a, B, c) into 8 buckets, " +
+        "partition by hash (a, B, c) partitions 8, " +
         "range (A) (partition values < 1, partition 1 <= values < 2, " +
         "partition 2 <= values < 3, partition 3 <= values < 4, partition 4 <= 
values) " +
         "stored as kudu");
@@ -1979,18 +1979,18 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "(partition 'aa' < values <= 'bb') stored as kudu");
     // Null values in range partition values
     AnalysisError("create table tab (id int, name string, primary key(id, 
name)) " +
-        "partition by hash (id) into 3 buckets, range (name) " +
+        "partition by hash (id) partitions 3, range (name) " +
         "(partition value = null, partition value = 1) stored as kudu",
         "Range partition values cannot be NULL. Range partition: 'PARTITION " +
         "VALUE = NULL'");
     // Primary key specified in tblproperties
     AnalysisError(String.format("create table tab (x int) partition by hash 
(x) " +
-        "into 8 buckets stored as kudu tblproperties ('%s' = 'x')",
+        "partitions 8 stored as kudu tblproperties ('%s' = 'x')",
         KuduTable.KEY_KEY_COLUMNS), "PRIMARY KEY must be used instead of the 
table " +
         "property");
     // Primary key column that doesn't exist
     AnalysisError("create table tab (x int, y int, primary key (z)) " +
-        "partition by hash (x) into 8 buckets stored as kudu",
+        "partition by hash (x) partitions 8 stored as kudu",
         "PRIMARY KEY column 'z' does not exist in the table");
     // Invalid composite primary key
     AnalysisError("create table tab (x int primary key, primary key(x)) stored 
" +
@@ -2003,7 +2003,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "of the column definition.");
     // Specifying the same primary key column multiple times
     AnalysisError("create table tab (x int, primary key (x, x)) partition by 
hash (x) " +
-        "into 8 buckets stored as kudu",
+        "partitions 8 stored as kudu",
         "Column 'x' is listed multiple times as a PRIMARY KEY.");
     // Number of range partition boundary values should be equal to the number 
of range
     // columns.
@@ -2014,7 +2014,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "partitioning columns: (2 vs 1). Range partition: 'PARTITION VALUE = 
(1,2)'");
     // Key ranges must match the column types.
     AnalysisError("create table tab (a int, b int, c int, d int, primary 
key(a, b, c)) " +
-        "partition by hash (a, b, c) into 8 buckets, range (a) " +
+        "partition by hash (a, b, c) partitions 8, range (a) " +
         "(partition value = 1, partition value = 'abc', partition 3 <= values) 
" +
         "stored as kudu", "Range partition value 'abc' (type: STRING) is not 
type " +
         "compatible with partitioning column 'a' (type: INT).");
@@ -2044,7 +2044,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "Only key columns can be used in PARTITION BY.");
     // No float range partition values
     AnalysisError("create table tab (a int, b int, c int, d int, primary key 
(a, b, c))" +
-        "partition by hash (a, b, c) into 8 buckets, " +
+        "partition by hash (a, b, c) partitions 8, " +
         "range (a) (partition value = 1.2, partition value = 2) stored as 
kudu",
         "Range partition value 1.2 (type: DECIMAL(2,1)) is not type compatible 
with " +
         "partitioning column 'a' (type: INT).");
@@ -2055,7 +2055,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "is not a key column. Only key columns can be used in PARTITION BY");
     // Kudu table name is specified in tblproperties
     AnalyzesOk("create table tab (x int primary key) partition by hash (x) " +
-        "into 8 buckets stored as kudu tblproperties 
('kudu.table_name'='tab_1'," +
+        "partitions 8 stored as kudu tblproperties 
('kudu.table_name'='tab_1'," +
         "'kudu.num_tablet_replicas'='1'," +
         "'kudu.master_addresses' = '127.0.0.1:8080, 127.0.0.1:8081')");
     // No port is specified in kudu master address
@@ -2079,7 +2079,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "'testPool'", "A Kudu table cannot be cached in HDFS.");
     // LOCATION cannot be used with Kudu tables
     AnalysisError("create table tab (a int primary key) partition by hash (a) 
" +
-        "into 3 buckets stored as kudu location '/test-warehouse/'",
+        "partitions 3 stored as kudu location '/test-warehouse/'",
         "LOCATION cannot be specified for a Kudu table.");
     // PARTITION BY is required for managed tables.
     AnalysisError("create table tab (a int, primary key (a)) stored as kudu",
@@ -2105,12 +2105,12 @@ public class AnalyzeDDLTest extends FrontendTestBase {
 
       // Unsupported type is PK and partition col
       String stmt = String.format("create table tab (x %s primary key) " +
-          "partition by hash(x) into 3 buckets stored as kudu", t);
+          "partition by hash(x) partitions 3 stored as kudu", t);
       AnalysisError(stmt, expectedError);
 
       // Unsupported type is not PK/partition col
       stmt = String.format("create table tab (x int primary key, y %s) " +
-          "partition by hash(x) into 3 buckets stored as kudu", t);
+          "partition by hash(x) partitions 3 stored as kudu", t);
       AnalysisError(stmt, expectedError);
     }
 
@@ -2126,7 +2126,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
               AnalyzesOk(String.format("create table tab (x int primary key " +
                   "not null encoding %s compression %s %s %s, y int encoding 
%s " +
                   "compression %s %s %s %s) partition by hash (x) " +
-                  "into 3 buckets stored as kudu", enc, comp, def, block, enc,
+                  "partitions 3 stored as kudu", enc, comp, def, block, enc,
                   comp, def, nul, block));
             }
           }
@@ -2136,23 +2136,23 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     // Primary key specified using the PRIMARY KEY clause
     AnalyzesOk("create table tab (x int not null encoding plain_encoding " +
         "compression snappy block_size 1, y int null encoding rle compression 
lz4 " +
-        "default 1, primary key(x)) partition by hash (x) into 3 buckets " +
+        "default 1, primary key(x)) partition by hash (x) partitions 3 " +
         "stored as kudu");
     // Primary keys can't be null
     AnalysisError("create table tab (x int primary key null, y int not null) " 
+
-        "partition by hash (x) into 3 buckets stored as kudu", "Primary key 
columns " +
+        "partition by hash (x) partitions 3 stored as kudu", "Primary key 
columns " +
         "cannot be nullable: x INT PRIMARY KEY NULL");
     AnalysisError("create table tab (x int not null, y int null, primary key 
(x, y)) " +
-        "partition by hash (x) into 3 buckets stored as kudu", "Primary key 
columns " +
+        "partition by hash (x) partitions 3 stored as kudu", "Primary key 
columns " +
         "cannot be nullable: y INT NULL");
     // Unsupported encoding value
     AnalysisError("create table tab (x int primary key, y int encoding 
invalid_enc) " +
-        "partition by hash (x) into 3 buckets stored as kudu", "Unsupported 
encoding " +
+        "partition by hash (x) partitions 3 stored as kudu", "Unsupported 
encoding " +
         "value 'INVALID_ENC'. Supported encoding values are: " +
         Joiner.on(", ").join(Encoding.values()));
     // Unsupported compression algorithm
     AnalysisError("create table tab (x int primary key, y int compression " +
-        "invalid_comp) partition by hash (x) into 3 buckets stored as kudu",
+        "invalid_comp) partition by hash (x) partitions 3 stored as kudu",
         "Unsupported compression algorithm 'INVALID_COMP'. Supported 
compression " +
         "algorithms are: " + Joiner.on(", 
").join(CompressionAlgorithm.values()));
     // Default values
@@ -2160,38 +2160,38 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "i3 int default 100, i4 bigint default 1000, vals string default 
'test', " +
         "valf float default cast(1.2 as float), vald double default " +
         "cast(3.1452 as double), valb boolean default true, " +
-        "primary key (i1, i2, i3, i4, vals)) partition by hash (i1) into 3 " +
-        "buckets stored as kudu");
+        "primary key (i1, i2, i3, i4, vals)) partition by hash (i1) partitions 
3 " +
+        "stored as kudu");
     AnalyzesOk("create table tab (i int primary key default 1+1+1) " +
-        "partition by hash (i) into 3 buckets stored as kudu");
+        "partition by hash (i) partitions 3 stored as kudu");
     AnalyzesOk("create table tab (i int primary key default factorial(5)) " +
-        "partition by hash (i) into 3 buckets stored as kudu");
+        "partition by hash (i) partitions 3 stored as kudu");
     AnalyzesOk("create table tab (i int primary key, x int null default " +
-        "isnull(null, null)) partition by hash (i) into 3 buckets stored as 
kudu");
+        "isnull(null, null)) partition by hash (i) partitions 3 stored as 
kudu");
     // Invalid default values
     AnalysisError("create table tab (i int primary key default 'string_val') " 
+
-        "partition by hash (i) into 3 buckets stored as kudu", "Default value 
" +
+        "partition by hash (i) partitions 3 stored as kudu", "Default value " +
         "'string_val' (type: STRING) is not compatible with column 'i' (type: 
INT).");
     AnalysisError("create table tab (i int primary key, x int default 1.1) " +
-        "partition by hash (i) into 3 buckets stored as kudu",
+        "partition by hash (i) partitions 3 stored as kudu",
         "Default value 1.1 (type: DECIMAL(2,1)) is not compatible with column 
" +
         "'x' (type: INT).");
     AnalysisError("create table tab (i tinyint primary key default 128) " +
-        "partition by hash (i) into 3 buckets stored as kudu", "Default value 
" +
+        "partition by hash (i) partitions 3 stored as kudu", "Default value " +
         "128 (type: SMALLINT) is not compatible with column 'i' (type: 
TINYINT).");
     AnalysisError("create table tab (i int primary key default isnull(null, 
null)) " +
-        "partition by hash (i) into 3 buckets stored as kudu", "Default value 
of " +
+        "partition by hash (i) partitions 3 stored as kudu", "Default value of 
" +
         "NULL not allowed on non-nullable column: 'i'");
     AnalysisError("create table tab (i int primary key, x int not null " +
-        "default isnull(null, null)) partition by hash (i) into 3 buckets " +
+        "default isnull(null, null)) partition by hash (i) partitions 3 " +
         "stored as kudu", "Default value of NULL not allowed on non-nullable 
column: " +
         "'x'");
     // Invalid block_size values
     AnalysisError("create table tab (i int primary key block_size 1.1) " +
-        "partition by hash (i) into 3 buckets stored as kudu", "Invalid value 
" +
+        "partition by hash (i) partitions 3 stored as kudu", "Invalid value " +
         "for BLOCK_SIZE: 1.1. A positive INTEGER value is expected.");
     AnalysisError("create table tab (i int primary key block_size 'val') " +
-        "partition by hash (i) into 3 buckets stored as kudu", "Invalid value 
" +
+        "partition by hash (i) partitions 3 stored as kudu", "Invalid value " +
         "for BLOCK_SIZE: 'val'. A positive INTEGER value is expected.");
   }
 
@@ -2438,7 +2438,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
 
     // Kudu specific clauses used in an Avro table.
     AnalysisError("create table functional.new_table (i int) " +
-        "partition by hash(i) into 3 buckets stored as avro",
+        "partition by hash(i) partitions 3 stored as avro",
         "Only Kudu tables can use the PARTITION BY clause.");
     AnalysisError("create table functional.new_table (i int primary key) " +
         "stored as avro", "Unsupported column options for file format 'AVRO': 
" +

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java 
b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
index 3d6bbec..9821694 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
@@ -911,7 +911,7 @@ public class AuthorizationTest {
 
     // IMPALA-4000: ALL privileges on SERVER are not required to create 
managed tables.
     AuthzOk("create table tpch.kudu_tbl (i int, j int, primary key (i))" +
-        " PARTITION BY HASH (i) INTO 9 BUCKETS stored as kudu TBLPROPERTIES " +
+        " PARTITION BY HASH (i) PARTITIONS 9 stored as kudu TBLPROPERTIES " +
         "('kudu.master_addresses'='127.0.0.1')");
 
     // User does not have permission to create table at the specified 
location..

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java 
b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
index a73adf3..565b207 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
@@ -2445,14 +2445,14 @@ public class ParserTest extends FrontendTestBase {
 
 
     // Flexible partitioning
-    ParsesOk("CREATE TABLE Foo (i int) PARTITION BY HASH(i) INTO 4 BUCKETS");
-    ParsesOk("CREATE TABLE Foo (i int) PARTITION BY HASH(i) INTO 4 BUCKETS, " +
-        "HASH(a) INTO 2 BUCKETS");
-    ParsesOk("CREATE TABLE Foo (i int) PARTITION BY HASH INTO 4 BUCKETS");
-    ParsesOk("CREATE TABLE Foo (i int, k int) PARTITION BY HASH INTO 4 
BUCKETS," +
-        " HASH(k) INTO 4 BUCKETS");
+    ParsesOk("CREATE TABLE Foo (i int) PARTITION BY HASH(i) PARTITIONS 4");
+    ParsesOk("CREATE TABLE Foo (i int) PARTITION BY HASH(i) PARTITIONS 4, " +
+        "HASH(a) PARTITIONS 2");
+    ParsesOk("CREATE TABLE Foo (i int) PARTITION BY HASH PARTITIONS 4");
+    ParsesOk("CREATE TABLE Foo (i int, k int) PARTITION BY HASH PARTITIONS 4," 
+
+        " HASH(k) PARTITIONS 4");
     ParserError("CREATE TABLE Foo (i int) PARTITION BY HASH(i)");
-    ParserError("CREATE EXTERNAL TABLE Foo PARTITION BY HASH INTO 4 BUCKETS");
+    ParserError("CREATE EXTERNAL TABLE Foo PARTITION BY HASH PARTITIONS 4");
 
     // Range partitioning
     ParsesOk("CREATE TABLE Foo (i int) PARTITION BY RANGE (PARTITION VALUE = 
10)");
@@ -2471,7 +2471,7 @@ public class ParserTest extends FrontendTestBase {
         "(PARTITION VALUE = (2001, 1), PARTITION VALUE = (2001, 2), " +
         "PARTITION VALUE = (2002, 1))");
     ParsesOk("CREATE TABLE Foo (a int, b string) PARTITION BY " +
-        "HASH (a) INTO 3 BUCKETS, RANGE (a, b) (PARTITION VALUE = (1, 'abc'), 
" +
+        "HASH (a) PARTITIONS 3, RANGE (a, b) (PARTITION VALUE = (1, 'abc'), " +
         "PARTITION VALUE = (2, 'def'))");
     ParsesOk("CREATE TABLE Foo (a int) PARTITION BY RANGE (a) " +
         "(PARTITION VALUE = 1 + 1) STORED AS KUDU");
@@ -2484,10 +2484,10 @@ public class ParserTest extends FrontendTestBase {
         "STORED AS KUDU");
 
     ParserError("CREATE TABLE Foo (a int) PARTITION BY RANGE (a) ()");
-    ParserError("CREATE TABLE Foo (a int) PARTITION BY HASH (a) INTO 4 
BUCKETS, " +
+    ParserError("CREATE TABLE Foo (a int) PARTITION BY HASH (a) PARTITIONS 4, 
" +
         "RANGE (a) (PARTITION VALUE = 10), RANGE (a) (PARTITION VALUES < 10)");
     ParserError("CREATE TABLE Foo (a int) PARTITION BY RANGE (a) " +
-        "(PARTITION VALUE = 10), HASH (a) INTO 3 BUCKETS");
+        "(PARTITION VALUE = 10), HASH (a) PARTITIONS 3");
     ParserError("CREATE TABLE Foo (a int) PARTITION BY RANGE (a) " +
         "(PARTITION VALUES = 10) STORED AS KUDU");
     ParserError("CREATE TABLE Foo (a int) PARTITION BY RANGE (a) " +
@@ -2676,10 +2676,10 @@ public class ParserTest extends FrontendTestBase {
     ParsesOk("CREATE TABLE Foo ROW FORMAT DELIMITED STORED AS PARQUETFILE AS 
SELECT 1");
     ParsesOk("CREATE TABLE Foo TBLPROPERTIES ('a'='b', 'c'='d') AS SELECT * 
from bar");
     ParsesOk("CREATE TABLE Foo PRIMARY KEY (a, b) AS SELECT * from bar");
-    ParsesOk("CREATE TABLE Foo PRIMARY KEY (a, b) PARTITION BY HASH INTO 2 
BUCKETS " +
+    ParsesOk("CREATE TABLE Foo PRIMARY KEY (a, b) PARTITION BY HASH PARTITIONS 
2 " +
+        "AS SELECT * from bar");
+    ParsesOk("CREATE TABLE Foo PRIMARY KEY (a, b) PARTITION BY HASH (b) 
PARTITIONS 2 " +
         "AS SELECT * from bar");
-    ParsesOk("CREATE TABLE Foo PRIMARY KEY (a, b) PARTITION BY HASH (b) INTO 2 
" +
-        "BUCKETS AS SELECT * from bar");
 
     // With clause works
     ParsesOk("CREATE TABLE Foo AS with t1 as (select 1) select * from t1");
@@ -2708,10 +2708,10 @@ public class ParserTest extends FrontendTestBase {
     ParserError("CREATE TABLE Foo PARTITIONED BY (a, b=2) AS SELECT * from 
Bar");
 
     // Flexible partitioning
-    ParsesOk("CREATE TABLE Foo PRIMARY KEY (i) PARTITION BY HASH(i) INTO 4 
BUCKETS AS " +
+    ParsesOk("CREATE TABLE Foo PRIMARY KEY (i) PARTITION BY HASH(i) PARTITIONS 
4 AS " +
         "SELECT 1");
-    ParserError("CREATE TABLE Foo PARTITION BY HASH(i) INTO 4 BUCKETS AS 
SELECT 1");
-    ParsesOk("CREATE TABLE Foo PRIMARY KEY (a) PARTITION BY HASH(a) INTO 4 
BUCKETS " +
+    ParserError("CREATE TABLE Foo PARTITION BY HASH(i) PARTITIONS 4 AS SELECT 
1");
+    ParsesOk("CREATE TABLE Foo PRIMARY KEY (a) PARTITION BY HASH(a) PARTITIONS 
4 " +
         "TBLPROPERTIES ('a'='b', 'c'='d') AS SELECT * from bar");
     ParsesOk("CREATE TABLE Foo PRIMARY KEY (a) PARTITION BY RANGE(a) " +
         "(PARTITION 1 < VALUES < 10, PARTITION 10 <= VALUES < 20, PARTITION 
VALUE = 30) " +

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/bin/generate-schema-statements.py
----------------------------------------------------------------------
diff --git a/testdata/bin/generate-schema-statements.py 
b/testdata/bin/generate-schema-statements.py
index 5d5da1b..07e722a 100755
--- a/testdata/bin/generate-schema-statements.py
+++ b/testdata/bin/generate-schema-statements.py
@@ -223,7 +223,7 @@ def build_table_template(file_format, columns, 
partition_columns, row_format,
   elif file_format == 'kudu':
     # Use partitioned_by to set a trivial hash distribution
     assert not partitioned_by, "Kudu table shouldn't have partition cols 
defined"
-    partitioned_by = "partition by hash into 3 buckets"
+    partitioned_by = "partition by hash partitions 3"
 
     # Fetch KUDU host and port from environment
     kudu_master = os.getenv("KUDU_MASTER_ADDRESS", "127.0.0.1")

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/datasets/functional/functional_schema_template.sql
----------------------------------------------------------------------
diff --git a/testdata/datasets/functional/functional_schema_template.sql 
b/testdata/datasets/functional/functional_schema_template.sql
index d6859eb..97b40f4 100644
--- a/testdata/datasets/functional/functional_schema_template.sql
+++ b/testdata/datasets/functional/functional_schema_template.sql
@@ -93,7 +93,7 @@ CREATE TABLE {db_name}{db_suffix}.{table_name} (
   year INT,
   month INT
 )
-PARTITION BY HASH (id) INTO 3 BUCKETS STORED AS KUDU;
+PARTITION BY HASH (id) PARTITIONS 3 STORED AS KUDU;
 ---- DEPENDENT_LOAD_KUDU
 INSERT into TABLE {db_name}{db_suffix}.{table_name}
 SELECT id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, 
float_col, double_col, date_string_col, string_col,
@@ -171,7 +171,7 @@ CREATE TABLE {db_name}{db_suffix}.{table_name} (
   year INT,
   month INT
 )
-PARTITION BY HASH (id) INTO 3 BUCKETS STORED AS KUDU;
+PARTITION BY HASH (id) PARTITIONS 3 STORED AS KUDU;
 ---- DEPENDENT_LOAD_KUDU
 INSERT into TABLE {db_name}{db_suffix}.{table_name}
 SELECT id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, 
float_col, double_col, date_string_col, string_col,
@@ -230,7 +230,7 @@ CREATE TABLE {db_name}{db_suffix}.{table_name} (
   year INT,
   month INT
 )
-PARTITION BY HASH (id) INTO 3 BUCKETS STORED AS KUDU;
+PARTITION BY HASH (id) PARTITIONS 3 STORED AS KUDU;
 ---- DEPENDENT_LOAD_KUDU
 INSERT INTO TABLE {db_name}{db_suffix}.{table_name}
 SELECT id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, 
float_col, double_col, date_string_col, string_col,
@@ -566,7 +566,7 @@ CREATE TABLE {db_name}{db_suffix}.{table_name}_idx (
   month INT NULL,
   day INT NULL
 )
-PARTITION BY HASH (kudu_idx) INTO 3 BUCKETS STORED AS KUDU;
+PARTITION BY HASH (kudu_idx) PARTITIONS 3 STORED AS KUDU;
 CREATE VIEW {db_name}{db_suffix}.{table_name} AS
 SELECT id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col,
        double_col, date_string_col, string_col, timestamp_col, year, month, day
@@ -645,7 +645,7 @@ CREATE TABLE {db_name}{db_suffix}.{table_name} (
   month INT,
   day INT
 )
-PARTITION BY HASH (id) INTO 3 BUCKETS STORED AS KUDU;
+PARTITION BY HASH (id) PARTITIONS 3 STORED AS KUDU;
 ---- DEPENDENT_LOAD_KUDU
 INSERT into TABLE {db_name}{db_suffix}.{table_name}
 SELECT id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col,
@@ -1157,7 +1157,7 @@ CREATE TABLE {db_name}{db_suffix}.{table_name} (
   field STRING PRIMARY KEY,
   f2 INT
 )
-PARTITION BY HASH (field) INTO 3 BUCKETS STORED AS KUDU;
+PARTITION BY HASH (field) PARTITIONS 3 STORED AS KUDU;
 ====
 ---- DATASET
 functional
@@ -1313,7 +1313,7 @@ create table {db_name}{db_suffix}.{table_name} (
   a string primary key, b string null, c string null, d int null, e double 
null,
   f string null, g string null
 )
-partition by hash(a) into 3 buckets stored as kudu;
+partition by hash(a) partitions 3 stored as kudu;
 ====
 ---- DATASET
 functional
@@ -1340,7 +1340,7 @@ create table {db_name}{db_suffix}.{table_name} (
   a string primary key, b string null, c string null, d int null, e double 
null,
   f string null, g string null
 )
-partition by hash(a) into 3 buckets stored as kudu;
+partition by hash(a) partitions 3 stored as kudu;
 ====
 ---- DATASET
 functional

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/datasets/tpcds/tpcds_kudu_template.sql
----------------------------------------------------------------------
diff --git a/testdata/datasets/tpcds/tpcds_kudu_template.sql 
b/testdata/datasets/tpcds/tpcds_kudu_template.sql
index 021fe87..5c4932f 100644
--- a/testdata/datasets/tpcds/tpcds_kudu_template.sql
+++ b/testdata/datasets/tpcds/tpcds_kudu_template.sql
@@ -49,7 +49,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.store_sales (
   ss_net_profit DOUBLE,
   PRIMARY KEY (ss_ticket_number, ss_item_sk)
 )
-PARTITION BY HASH (ss_ticket_number,ss_item_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (ss_ticket_number,ss_item_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -117,7 +117,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.web_sales (
   ws_net_profit DOUBLE,
   PRIMARY KEY (ws_order_number, ws_item_sk)
 )
-PARTITION BY HASH (ws_order_number,ws_item_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (ws_order_number,ws_item_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -197,7 +197,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.catalog_sales (
   cs_net_profit DOUBLE,
   PRIMARY KEY (cs_order_number, cs_item_sk)
 )
-PARTITION BY HASH (cs_order_number,cs_item_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (cs_order_number,cs_item_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -263,7 +263,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.store_returns (
   sr_net_loss DOUBLE,
   PRIMARY KEY (sr_ticket_number, sr_item_sk)
 )
-PARTITION BY HASH (sr_ticket_number,sr_item_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (sr_ticket_number,sr_item_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -319,7 +319,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.web_returns (
   wr_net_loss DOUBLE,
   PRIMARY KEY (wr_order_number, wr_item_sk)
 )
-PARTITION BY HASH (wr_order_number,wr_item_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (wr_order_number,wr_item_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -382,7 +382,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.catalog_returns 
(
   cr_net_loss DOUBLE,
   PRIMARY KEY (cr_order_number, cr_item_sk)
 )
-PARTITION BY HASH (cr_order_number,cr_item_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (cr_order_number,cr_item_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -425,7 +425,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.inventory (
   inv_quantity_on_hand BIGINT,
   PRIMARY KEY (inv_date_sk, inv_item_sk, inv_warehouse_sk)
 )
-PARTITION BY HASH (inv_item_sk,inv_date_sk,inv_warehouse_sk) INTO {buckets} 
BUCKETS
+PARTITION BY HASH (inv_item_sk,inv_date_sk,inv_warehouse_sk) PARTITIONS 
{buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -453,7 +453,7 @@ CREATE TABLE {target_db_name}.customer (
   c_email_address STRING,
   c_last_review_date BIGINT
 )
-PARTITION BY HASH (c_customer_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (c_customer_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -475,7 +475,7 @@ CREATE TABLE IF NOT EXISTS 
{target_db_name}.customer_address (
   ca_gmt_offset DOUBLE,
   ca_location_type STRING
 )
-PARTITION BY HASH (ca_address_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (ca_address_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -494,7 +494,7 @@ CREATE TABLE IF NOT EXISTS 
{target_db_name}.customer_demographics (
   cd_dep_employed_count BIGINT,
   cd_dep_college_count BIGINT
 )
-PARTITION BY HASH (cd_demo_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (cd_demo_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -532,7 +532,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.date_dim (
   d_current_quarter STRING,
   d_current_year STRING
 )
-PARTITION BY HASH (d_date_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (d_date_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -546,7 +546,7 @@ CREATE TABLE IF NOT EXISTS 
{target_db_name}.household_demographics (
   hd_dep_count BIGINT,
   hd_vehicle_count BIGINT
 )
-PARTITION BY HASH (hd_demo_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (hd_demo_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -578,7 +578,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.item (
   i_manager_id BIGINT,
   i_product_name STRING
 )
-PARTITION BY HASH (i_item_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (i_item_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -606,7 +606,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.promotion (
   p_purpose STRING,
   p_discount_active STRING
 )
-PARTITION BY HASH (p_promo_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (p_promo_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -665,7 +665,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.store (
   s_gmt_offset DOUBLE,
   s_tax_precentage DOUBLE
 )
-PARTITION BY HASH (s_store_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (s_store_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -684,7 +684,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.time_dim (
   t_sub_shift STRING,
   t_meal_time STRING
 )
-PARTITION BY HASH (t_time_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (t_time_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -724,7 +724,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.call_center (
   cc_gmt_offset DOUBLE,
   cc_tax_percentage DOUBLE
 )
-PARTITION BY HASH (cc_call_center_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (cc_call_center_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -742,7 +742,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.catalog_page (
   cp_description STRING,
   cp_type STRING
 )
-PARTITION BY HASH (cp_catalog_page_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (cp_catalog_page_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -754,7 +754,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.income_band (
   ib_lower_bound BIGINT,
   ib_upper_bound BIGINT
 )
-PARTITION BY HASH (ib_income_band_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (ib_income_band_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -766,7 +766,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.reason (
   r_reason_id STRING,
   r_reason_desc STRING
 )
-PARTITION BY HASH (r_reason_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (r_reason_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -781,7 +781,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.ship_mode (
   sm_carrier STRING,
   sm_contract STRING
 )
-PARTITION BY HASH (sm_ship_mode_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (sm_ship_mode_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -804,7 +804,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.warehouse (
   w_country STRING,
   w_gmt_offset DOUBLE
 )
-PARTITION BY HASH (w_warehouse_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (w_warehouse_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -827,7 +827,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.web_page (
   wp_image_count BIGINT,
   wp_max_ad_count BIGINT
 )
-PARTITION BY HASH (wp_web_page_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (wp_web_page_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 
@@ -862,7 +862,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.web_site (
   web_gmt_offset DOUBLE,
   web_tax_percentage DOUBLE
 )
-PARTITION BY HASH (web_site_sk) INTO {buckets} BUCKETS
+PARTITION BY HASH (web_site_sk) PARTITIONS {buckets}
 STORED AS KUDU
 TBLPROPERTIES ('kudu.master_addresses'='{kudu_master}:7051');
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/datasets/tpch/tpch_kudu_template.sql
----------------------------------------------------------------------
diff --git a/testdata/datasets/tpch/tpch_kudu_template.sql 
b/testdata/datasets/tpch/tpch_kudu_template.sql
index ed9765d..62fa072 100644
--- a/testdata/datasets/tpch/tpch_kudu_template.sql
+++ b/testdata/datasets/tpch/tpch_kudu_template.sql
@@ -42,7 +42,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.lineitem (
   L_COMMENT STRING,
   PRIMARY KEY (L_ORDERKEY, L_LINENUMBER)
 )
-partition by hash (l_orderkey) into {buckets} buckets
+partition by hash (l_orderkey) partitions {buckets}
 STORED AS KUDU
 tblproperties ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -78,7 +78,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.part (
   P_RETAILPRICE DOUBLE,
   P_COMMENT STRING
 )
-partition by hash (p_partkey) into {buckets} buckets
+partition by hash (p_partkey) partitions {buckets}
 STORED AS KUDU
 tblproperties ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -93,7 +93,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.partsupp (
   PS_COMMENT STRING,
   PRIMARY KEY (PS_PARTKEY, PS_SUPPKEY)
 )
-partition by hash (ps_partkey, ps_suppkey) into {buckets} buckets
+partition by hash (ps_partkey, ps_suppkey) partitions {buckets}
 STORED AS KUDU
 tblproperties ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -109,7 +109,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.supplier (
   S_ACCTBAL DOUBLE,
   S_COMMENT STRING
 )
-partition by hash (s_suppkey) into {buckets} buckets
+partition by hash (s_suppkey) partitions {buckets}
 STORED AS KUDU
 tblproperties ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -122,7 +122,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.nation (
   N_REGIONKEY BIGINT,
   N_COMMENT STRING
 )
-partition by hash (n_nationkey) into {buckets} buckets
+partition by hash (n_nationkey) partitions {buckets}
 STORED AS KUDU
 tblproperties ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -134,7 +134,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.region (
   R_NAME STRING,
   R_COMMENT STRING
 )
-partition by hash (r_regionkey) into {buckets} buckets
+partition by hash (r_regionkey) partitions {buckets}
 STORED AS KUDU
 tblproperties ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -152,7 +152,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.orders (
   O_SHIPPRIORITY BIGINT,
   O_COMMENT STRING
 )
-partition by hash (o_orderkey) into {buckets} buckets
+partition by hash (o_orderkey) partitions {buckets}
 STORED AS KUDU
 tblproperties ('kudu.master_addresses' = '{kudu_master}:7051');
 
@@ -169,7 +169,7 @@ CREATE TABLE IF NOT EXISTS {target_db_name}.customer (
   C_MKTSEGMENT STRING,
   C_COMMENT STRING
 )
-partition by hash (c_custkey) into {buckets} buckets
+partition by hash (c_custkey) partitions {buckets}
 STORED AS KUDU
 tblproperties ('kudu.master_addresses' = '{kudu_master}:7051');
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/datasets/tpch/tpch_schema_template.sql
----------------------------------------------------------------------
diff --git a/testdata/datasets/tpch/tpch_schema_template.sql 
b/testdata/datasets/tpch/tpch_schema_template.sql
index dfa2305..2f99fd0 100644
--- a/testdata/datasets/tpch/tpch_schema_template.sql
+++ b/testdata/datasets/tpch/tpch_schema_template.sql
@@ -60,7 +60,7 @@ create table if not exists {db_name}{db_suffix}.{table_name} (
   L_COMMENT STRING,
   PRIMARY KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
 )
-partition by hash (l_orderkey) into 9 buckets stored as kudu;
+partition by hash (l_orderkey) partitions 9 stored as kudu;
 ---- DEPENDENT_LOAD
 INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name} SELECT * FROM 
{db_name}.{table_name};
 ---- LOAD
@@ -95,7 +95,7 @@ create table if not exists {db_name}{db_suffix}.{table_name} (
   P_RETAILPRICE DOUBLE,
   P_COMMENT STRING
 )
-partition by hash (p_partkey) into 9 buckets stored as kudu;
+partition by hash (p_partkey) partitions 9 stored as kudu;
 ---- DEPENDENT_LOAD
 INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name} SELECT * FROM 
{db_name}.{table_name};
 ---- LOAD
@@ -123,7 +123,7 @@ create table if not exists 
{db_name}{db_suffix}.{table_name} (
   PS_COMMENT STRING,
   PRIMARY KEY(PS_PARTKEY, PS_SUPPKEY)
 )
-partition by hash (ps_partkey, ps_suppkey) into 9 buckets stored as kudu;
+partition by hash (ps_partkey, ps_suppkey) partitions 9 stored as kudu;
 ---- DEPENDENT_LOAD
 INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name} SELECT * FROM 
{db_name}.{table_name};
 ---- LOAD
@@ -154,7 +154,7 @@ create table if not exists 
{db_name}{db_suffix}.{table_name} (
   S_ACCTBAL DOUBLE,
   S_COMMENT STRING
 )
-partition by hash (s_suppkey) into 9 buckets stored as kudu;
+partition by hash (s_suppkey) partitions 9 stored as kudu;
 ---- DEPENDENT_LOAD
 INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name} SELECT * FROM 
{db_name}.{table_name};
 ---- LOAD
@@ -179,7 +179,7 @@ create table if not exists 
{db_name}{db_suffix}.{table_name} (
   N_REGIONKEY SMALLINT,
   N_COMMENT STRING
 )
-partition by hash (n_nationkey) into 9 buckets stored as kudu;
+partition by hash (n_nationkey) partitions 9 stored as kudu;
 ---- DEPENDENT_LOAD
 INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name} SELECT * FROM 
{db_name}.{table_name};
 ---- LOAD
@@ -202,7 +202,7 @@ create table if not exists 
{db_name}{db_suffix}.{table_name} (
   R_NAME STRING,
   R_COMMENT STRING
 )
-partition by hash (r_regionkey) into 9 buckets stored as kudu;
+partition by hash (r_regionkey) partitions 9 stored as kudu;
 ---- DEPENDENT_LOAD
 INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name} SELECT * FROM 
{db_name}.{table_name};
 ---- LOAD
@@ -237,7 +237,7 @@ create table if not exists 
{db_name}{db_suffix}.{table_name} (
   O_SHIPPRIORITY INT,
   O_COMMENT STRING
 )
-partition by hash (o_orderkey) into 9 buckets stored as kudu;
+partition by hash (o_orderkey) partitions 9 stored as kudu;
 ---- DEPENDENT_LOAD
 INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name} SELECT * FROM 
{db_name}.{table_name};
 ---- LOAD
@@ -270,7 +270,7 @@ create table if not exists 
{db_name}{db_suffix}.{table_name} (
   C_MKTSEGMENT STRING,
   C_COMMENT STRING
 )
-partition by hash (c_custkey) into 9 buckets stored as kudu;
+partition by hash (c_custkey) partitions 9 stored as kudu;
 ---- DEPENDENT_LOAD
 INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name} SELECT * FROM 
{db_name}.{table_name};
 ---- LOAD

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test 
b/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
index 5dfa96a..3524287 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
@@ -4717,13 +4717,13 @@ functional.alltypes where id < 10
 }
 ====
 # CTAS a Kudu table
-create table kudu_ctas primary key (id) partition by hash (id) into 3 buckets
+create table kudu_ctas primary key (id) partition by hash (id) partitions 3
 stored as kudu as select id, bool_col, tinyint_col, smallint_col, int_col,
 bigint_col, float_col, double_col, date_string_col, string_col
 from functional.alltypestiny
 ---- LINEAGE
 {
-    "queryText":"create table kudu_ctas primary key (id) partition by hash 
(id) into 3 buckets\nstored as kudu as select id, bool_col, tinyint_col, 
smallint_col, int_col,\nbigint_col, float_col, double_col, date_string_col, 
string_col\nfrom functional.alltypestiny",
+    "queryText":"create table kudu_ctas primary key (id) partition by hash 
(id) partitions 3\nstored as kudu as select id, bool_col, tinyint_col, 
smallint_col, int_col,\nbigint_col, float_col, double_col, date_string_col, 
string_col\nfrom functional.alltypestiny",
     "hash":"6e3e192c7fb8bb6b22674a9b7b488b55",
     "user":"dev",
     "timestamp":1479933751,

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-query/queries/QueryTest/kudu-scan-node.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/kudu-scan-node.test 
b/testdata/workloads/functional-query/queries/QueryTest/kudu-scan-node.test
index 21b1b95..0e7bf03 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu-scan-node.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu-scan-node.test
@@ -21,7 +21,7 @@ INT, BIGINT
 # Regression test for IMPALA-2740, a NULL value from a previously filtered row 
would
 # carry over into the next unfiltered row (the result below would incorrectly 
be 2,NULL).
 CREATE TABLE impala_2740 (key INT PRIMARY KEY, value INT)
-  PARTITION BY HASH (key) INTO 3 BUCKETS STORED AS KUDU;
+  PARTITION BY HASH (key) PARTITIONS 3 STORED AS KUDU;
 INSERT INTO impala_2740 VALUES (1, NULL), (2, -2);
 SELECT * FROM impala_2740 WHERE key != 1;
 ---- RESULTS
@@ -35,9 +35,9 @@ INT, INT
 # is run on all impalads. However, for the t1 table there is only as single 
scan range,
 # so two of the scan instances get empty scan ranges.
 CREATE TABLE impala_2635_t1 (id BIGINT PRIMARY KEY, name STRING)
-  PARTITION BY HASH (id) INTO 3 BUCKETS STORED AS KUDU;
+  PARTITION BY HASH (id) PARTITIONS 3 STORED AS KUDU;
 CREATE TABLE impala_2635_t2 (id BIGINT PRIMARY KEY, name STRING)
-  PARTITION BY HASH(id) INTO 16 BUCKETS STORED AS KUDU;
+  PARTITION BY HASH(id) PARTITIONS 16 STORED AS KUDU;
 INSERT INTO impala_2635_t1 VALUES (0, 'Foo');
 INSERT INTO impala_2635_t2 VALUES (1, 'Blah');
 SELECT * FROM impala_2635_t1 UNION ALL SELECT * FROM impala_2635_t2;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-query/queries/QueryTest/kudu-timeouts-catalogd.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/kudu-timeouts-catalogd.test
 
b/testdata/workloads/functional-query/queries/QueryTest/kudu-timeouts-catalogd.test
index ac57812..d811cfd 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/kudu-timeouts-catalogd.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/kudu-timeouts-catalogd.test
@@ -12,7 +12,7 @@ Error opening Kudu table 'impala::functional_kudu.alltypes'
 ====
 ---- QUERY
 create table test_kudu (x int primary key)
-partition by hash(x) into 3 buckets stored as kudu
+partition by hash(x) partitions 3 stored as kudu
 ---- CATCH
 Error creating Kudu table
 ====

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test 
b/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
index ac90e07..8c52318 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
@@ -1,7 +1,7 @@
 ====
 ---- QUERY
 create table simple (id int primary key, name string, valf float, vali bigint)
-  partition by hash (id) into 3 buckets stored as kudu
+  partition by hash (id) partitions 3 stored as kudu
 ---- RESULTS
 ====
 ---- QUERY
@@ -294,7 +294,7 @@ alter table tbl_to_alter set 
tblproperties('kudu.table_name'='kudu_tbl_to_alter'
 ====
 ---- QUERY
 # Create a new table and try to rename to an existing kudu table
-create table copy_of_tbl (a int primary key) partition by hash (a) into 3 
buckets
+create table copy_of_tbl (a int primary key) partition by hash (a) partitions 3
   stored as kudu tblproperties('kudu.table_name'='copy_of_tbl');
 alter table copy_of_tbl set 
tblproperties('kudu.table_name'='kudu_tbl_to_alter')
 ---- CATCH

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-query/queries/QueryTest/kudu_create.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/kudu_create.test 
b/testdata/workloads/functional-query/queries/QueryTest/kudu_create.test
index d67103b..938a6a4 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_create.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_create.test
@@ -1,6 +1,6 @@
 ====
 ---- QUERY
-create table t primary key (id) partition by hash (id) into 3 buckets
+create table t primary key (id) partition by hash (id) partitions 3
 stored as kudu
 as select id, int_col from functional.alltypestiny;
 select * from t;
@@ -19,14 +19,14 @@ INT,INT
 ---- QUERY
 # Boolean primary key column
 create table tab (x int, y boolean, primary key(x, y))
-  partition by hash (x) into 3 buckets stored as kudu
+  partition by hash (x) partitions 3 stored as kudu
 ---- CATCH
 NonRecoverableException: key column may not have type of BOOL, FLOAT, or DOUBLE
 ====
 ---- QUERY
 # Float primary key column
 create table tab (x int, y float, primary key(x, y))
-  partition by hash (x) into 3 buckets stored as kudu
+  partition by hash (x) partitions 3 stored as kudu
 ---- CATCH
 NonRecoverableException: key column may not have type of BOOL, FLOAT, or DOUBLE
 ====
@@ -34,27 +34,27 @@ NonRecoverableException: key column may not have type of 
BOOL, FLOAT, or DOUBLE
 # Primary keys should be declared first
 # TODO: See KUDU-1709 for improving Kudu error messages.
 create table tab (x int, y int, primary key(y))
-  partition by hash (y) into 3 buckets stored as kudu
+  partition by hash (y) partitions 3 stored as kudu
 ---- CATCH
 NonRecoverableException: Got out-of-order key column: name: "y" type: INT32 
is_key: true is_nullable: false cfile_block_size: 0
 ====
 ---- QUERY
-# Small number of hash buckets
+# Small number of hash partitions
 create table tab (a int, b int, c int, d int, primary key(a, b, c))
-  partition by hash(a,b) into 8 buckets, hash(c) into 1 buckets stored as kudu
+  partition by hash(a,b) partitions 8, hash(c) partitions 1 stored as kudu
 ---- CATCH
 NonRecoverableException: must have at least two hash buckets
 ====
 ---- QUERY
 # Same column in multiple hash based distributions
 create table tab (a int, b int, primary key (a))
-  partition by hash (a) into 3 buckets, hash (a) into 2 buckets stored as kudu
+  partition by hash (a) partitions 3, hash (a) partitions 2 stored as kudu
 ---- CATCH
 NonRecoverableException: hash bucket schema components must not contain 
columns in common
 ====
 ---- QUERY
 # Same column referenced multiple times in the same hash-based distribution
-create table tab (a int primary key) partition by hash (a, a, a) into 3 buckets
+create table tab (a int primary key) partition by hash (a, a, a) partitions 3
 stored as kudu
 ---- CATCH
 NonRecoverableException: hash bucket schema components must not contain 
columns in common
@@ -62,7 +62,7 @@ NonRecoverableException: hash bucket schema components must 
not contain columns
 ---- QUERY
 # Kudu table that uses Impala keywords as table name and column names
 create table `add`(`analytic` int, `function` int, primary key(`analytic`, 
`function`))
-partition by hash (`analytic`) into 4 buckets, range (`function`)
+partition by hash (`analytic`) partitions 4, range (`function`)
 (partition values <= 1, partition 1 < values <= 10, partition 10 < values) 
stored as kudu;
 insert into `add` select id, int_col from functional.alltypestiny;
 select * from `add`

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-query/queries/QueryTest/kudu_delete.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/kudu_delete.test 
b/testdata/workloads/functional-query/queries/QueryTest/kudu_delete.test
index 4dd3377..7094c59 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_delete.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_delete.test
@@ -301,7 +301,7 @@ create table multiple_key_cols
    smallint_col smallint, bool_col boolean null, int_col int null,
    double_col double null, float_col float null,
    primary key (string_col, bigint_col, tinyint_col, smallint_col))
-  PARTITION BY HASH (string_col) INTO 16 BUCKETS STORED AS KUDU
+  PARTITION BY HASH (string_col) PARTITIONS 16 STORED AS KUDU
 ====
 ---- QUERY
 insert into multiple_key_cols values
@@ -343,7 +343,7 @@ STRING,BIGINT,TINYINT,SMALLINT,BOOLEAN,INT,DOUBLE,FLOAT
 # IMPALA-3454: A delete that requires a rewrite may not get the Kudu column 
order correct
 # if the Kudu columns are of different types.
 create table impala_3454 (key_1 tinyint, key_2 bigint, PRIMARY KEY (key_1, 
key_2))
-  PARTITION BY HASH INTO 3 BUCKETS STORED AS KUDU
+  PARTITION BY HASH PARTITIONS 3 STORED AS KUDU
 ---- RESULTS
 ====
 ---- QUERY

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-query/queries/QueryTest/kudu_describe.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/kudu_describe.test 
b/testdata/workloads/functional-query/queries/QueryTest/kudu_describe.test
index fade3ce..878c079 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_describe.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_describe.test
@@ -30,7 +30,7 @@ create table describe_test
  c2 int not null default 100 encoding plain_encoding compression snappy,
  c3 int null block_size 8388608,
  primary key (pk1, pk2, pk3))
-partition by hash (pk1) into 3 buckets
+partition by hash (pk1) partitions 3
 stored as kudu;
 describe describe_test;
 ---- LABELS

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test 
b/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
index d67b75f..a61e226 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
@@ -281,7 +281,7 @@ NumRowErrors: 7299
 # IMPALA-2521: clustered insert into table.
 create table impala_2521
 (id bigint primary key, name string, zip int)
-partition by hash into 3 buckets stored as kudu
+partition by hash partitions 3 stored as kudu
 ---- RESULTS
 ====
 ---- QUERY
@@ -317,7 +317,7 @@ BIGINT,STRING,INT
 # Table with all supported types as primary key and distribution columns
 create table allkeytypes (i1 tinyint, i2 smallint, i3 int, i4 bigint, name 
string,
   valf float, vald double, primary key (i1, i2, i3, i4, name)) partition by
-  hash into 3 buckets, range (partition value = (1,1,1,1,'1'),
+  hash partitions 3, range (partition value = (1,1,1,1,'1'),
   partition value = (2,2,2,2,'2'), partition value = (3,3,3,3,'3')) stored as 
kudu
 ---- RESULTS
 ====
@@ -335,8 +335,8 @@ NumRowErrors: 6
 # Table with default values
 create table tbl_with_defaults (a int primary key, b int null default 10,
   c int not null default 100, d int default 1000, e int null, f int not null,
-  g string default 'test', h boolean default true) partition by hash (a) into 3
-  buckets stored as kudu
+  g string default 'test', h boolean default true) partition by hash (a)
+  partitions 3 stored as kudu
 ---- RESULTS
 ====
 ---- QUERY

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-query/queries/QueryTest/kudu_partition_ddl.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/kudu_partition_ddl.test 
b/testdata/workloads/functional-query/queries/QueryTest/kudu_partition_ddl.test
index 496492b..eefbc28 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/kudu_partition_ddl.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/kudu_partition_ddl.test
@@ -2,8 +2,8 @@
 ---- QUERY
 -- Test hash partitioning
 create table simple_hash (id int, name string, valf float, vali bigint,
-  primary key (id, name)) partition by hash(id) INTO 4 buckets,
-  hash(name) INTO 2 buckets stored as kudu
+  primary key (id, name)) partition by hash(id) partitions 4,
+  hash(name) partitions 2 stored as kudu
 ---- RESULTS
 ====
 ---- QUERY
@@ -124,7 +124,7 @@ INT,STRING,STRING,STRING,INT
 ---- QUERY
 -- Test hash and range partitioning
 create table simple_hash_range (id int, name string, valf float, vali bigint,
-  primary key (id, name)) partition by hash(id) into 4 buckets, range(id, name)
+  primary key (id, name)) partition by hash(id) partitions 4, range(id, name)
   (partition value = (10, 'martin'), partition value = (20, 'alex')) stored as 
kudu
 ---- RESULTS
 ====
@@ -146,7 +146,7 @@ INT,STRING,STRING,STRING,INT
 ====
 ---- QUERY
 create table simple_hash_range_ctas
-  primary key (id, name) partition by hash(id) into 4 buckets,
+  primary key (id, name) partition by hash(id) partitions 4,
   range(id, name) (partition value = (10, 'casey'), partition value = (20, 
'marcel'))
   stored as kudu
 as select * from simple_hash
@@ -172,7 +172,7 @@ INT,STRING,STRING,STRING,INT
 ---- QUERY
 -- Test hash defaults to all columns
 create table simple_hash_all_columns (id int, name string, valf float, vali 
bigint,
-  primary key (id, name)) partition by hash into 4 buckets stored as kudu
+  primary key (id, name)) partition by hash partitions 4 stored as kudu
 ---- RESULTS
 ====
 ---- QUERY

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/testdata/workloads/functional-query/queries/QueryTest/kudu_upsert.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/kudu_upsert.test 
b/testdata/workloads/functional-query/queries/QueryTest/kudu_upsert.test
index b0adb48..8180075 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_upsert.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_upsert.test
@@ -392,7 +392,7 @@ create table multiple_key_cols
   (string_col string, bigint_col bigint, tinyint_col tinyint, smallint_col 
smallint,
    bool_col boolean null, int_col int null, double_col double null,
    float_col float null, primary key (string_col, bigint_col, tinyint_col, 
smallint_col))
-  PARTITION BY HASH (string_col) INTO 16 BUCKETS STORED AS KUDU
+  PARTITION BY HASH (string_col) PARTITIONS 16 STORED AS KUDU
 ====
 ---- QUERY
 insert into multiple_key_cols values

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/tests/comparison/db_connection.py
----------------------------------------------------------------------
diff --git a/tests/comparison/db_connection.py 
b/tests/comparison/db_connection.py
index 8e35031..c0f39f0 100644
--- a/tests/comparison/db_connection.py
+++ b/tests/comparison/db_connection.py
@@ -796,7 +796,7 @@ class ImpalaCursor(DbCursor):
         # Impala into Postgres anyway. 3 was chosen for the buckets because our
         # minicluster tends to have 3 tablet servers, but otherwise it's 
arbitrary and
         # provides valid syntax for creating Kudu tables in Impala.
-        sql += '\nDISTRIBUTE BY HASH ({col}) INTO 3 BUCKETS'.format(
+        sql += '\nPARTITION BY HASH ({col}) PARTITIONS 3'.format(
             col=table.primary_key_names[0])
       else:
         raise Exception(

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/tests/comparison/tests/test_cursor.py
----------------------------------------------------------------------
diff --git a/tests/comparison/tests/test_cursor.py 
b/tests/comparison/tests/test_cursor.py
index 972a7f2..38c296c 100644
--- a/tests/comparison/tests/test_cursor.py
+++ b/tests/comparison/tests/test_cursor.py
@@ -76,11 +76,11 @@ def postgresql_cursor():
          'sql': {'two_cols': 'CREATE TABLE two_cols (col1 INT, col2 INT)',
                  'one_pk': 'CREATE TABLE one_pk (col1 INT, col2 INT, '
                            'PRIMARY KEY (col1))\n'
-                           'DISTRIBUTE BY HASH (col1) INTO 3 BUCKETS\n'
+                           'PARTITION BY HASH (col1) PARTITIONS 3\n'
                            'STORED AS KUDU',
                  'three_pks': 'CREATE TABLE three_pks (col1 INT, col2 
CHAR(255), '
                               'col3 INT, col4 INT, PRIMARY KEY (col1, col2, 
col3))\n'
-                              'DISTRIBUTE BY HASH (col1) INTO 3 BUCKETS\n'
+                              'PARTITION BY HASH (col1) PARTITIONS 3\n'
                               'STORED AS KUDU'}},
         {'class': PostgresqlCursor,
          'sql': {'two_cols': 'CREATE TABLE two_cols (col1 INTEGER NULL, '

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f83652c1/tests/query_test/test_cancellation.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_cancellation.py 
b/tests/query_test/test_cancellation.py
index b660c61..3e2e8ba 100644
--- a/tests/query_test/test_cancellation.py
+++ b/tests/query_test/test_cancellation.py
@@ -102,7 +102,7 @@ class TestCancellation(ImpalaTestSuite):
         assert QUERIES.has_key(query) and QUERIES[query] is not None,\
             "PRIMARY KEY for query %s not specified" % query
         query = "create table ctas_cancel primary key (%s) "\
-            "partition by hash into 3 buckets stored as kudu as %s" %\
+            "partition by hash partitions 3 stored as kudu as %s" %\
             (QUERIES[query], query)
       else:
         query = "create table ctas_cancel stored as %sfile as %s" %\

Reply via email to