Repository: hive
Updated Branches:
  refs/heads/master 8ebde0441 -> 9c907769a


HIVE-18453: ACID: Add "CREATE TRANSACTIONAL TABLE" syntax to unify ACID ORC & 
Parquet support (Igor Kryvenko via Eugene Koifman


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9c907769
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9c907769
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9c907769

Branch: refs/heads/master
Commit: 9c907769a63a6b23c91fdf0b3f3d0aa6387035dc
Parents: 8ebde04
Author: Igor Kryvenko <kryvenko7i...@gmail.com>
Authored: Tue Sep 18 19:16:38 2018 -0700
Committer: Eugene Koifman <ekoif...@apache.org>
Committed: Tue Sep 18 19:16:38 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |   1 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |   5 +-
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |   2 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  46 ++++-
 .../create_external_transactional.q             |   5 +
 .../create_transactional_full_acid.q            |  28 +++
 .../create_transactional_insert_only.q          |  13 ++
 .../create_external_transactional.q.out         |   1 +
 .../create_transactional_full_acid.q.out        | 197 +++++++++++++++++++
 .../create_transactional_insert_only.q.out      |  75 +++++++
 10 files changed, 362 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index 3caa51f..8bf9cc0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -304,6 +304,7 @@ KW_PRINCIPALS: 'PRINCIPALS';
 KW_COMPACT: 'COMPACT';
 KW_COMPACTIONS: 'COMPACTIONS';
 KW_TRANSACTIONS: 'TRANSACTIONS';
+KW_TRANSACTIONAL: 'TRANSACTIONAL';
 KW_REWRITE : 'REWRITE';
 KW_AUTHORIZATION: 'AUTHORIZATION';
 KW_REOPTIMIZATION: 'REOPTIMIZATION';

http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 48f7303..78bc87c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -618,6 +618,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
     xlateMap.put("KW_SCHEDULING_POLICY", "SCHEDULING_POLICY");
     xlateMap.put("KW_PATH", "PATH");
     xlateMap.put("KW_AST", "AST");
+    xlateMap.put("KW_TRANSACTIONAL", "TRANSACTIONAL");
 
     // Operators
     xlateMap.put("DOT", ".");
@@ -1091,7 +1092,7 @@ databaseComment
 createTableStatement
 @init { pushMsg("create table statement", state); }
 @after { popMsg(state); }
-    : KW_CREATE (temp=KW_TEMPORARY)? (ext=KW_EXTERNAL)? KW_TABLE ifNotExists? 
name=tableName
+    : KW_CREATE (temp=KW_TEMPORARY)? (trans=KW_TRANSACTIONAL)? 
(ext=KW_EXTERNAL)? KW_TABLE ifNotExists? name=tableName
       (  like=KW_LIKE likeName=tableName
          tableRowFormat?
          tableFileFormat?
@@ -1108,7 +1109,7 @@ createTableStatement
          tablePropertiesPrefixed?
          (KW_AS selectStatementWithCTE)?
       )
-    -> ^(TOK_CREATETABLE $name $temp? $ext? ifNotExists?
+    -> ^(TOK_CREATETABLE $name $temp? $trans? $ext? ifNotExists?
          ^(TOK_LIKETABLE $likeName?)
          columnNameTypeOrConstraintList?
          tableComment?

http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index f9c97e0..fa033d7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -809,7 +809,7 @@ nonReserved
     | KW_ROLE | KW_ROLES | KW_SCHEMA | KW_SCHEMAS | KW_SECOND | KW_SEMI | 
KW_SERDE | KW_SERDEPROPERTIES | KW_SERVER | KW_SETS | KW_SHARED
     | KW_SHOW | KW_SHOW_DATABASE | KW_SKEWED | KW_SORT | KW_SORTED | KW_SSL | 
KW_STATISTICS | KW_STORED | KW_AST
     | KW_STREAMTABLE | KW_STRING | KW_STRUCT | KW_TABLES | KW_TBLPROPERTIES | 
KW_TEMPORARY | KW_TERMINATED
-    | KW_TINYINT | KW_TOUCH | KW_TRANSACTIONS | KW_UNARCHIVE | KW_UNDO | 
KW_UNIONTYPE | KW_UNLOCK | KW_UNSET
+    | KW_TINYINT | KW_TOUCH | KW_TRANSACTIONAL | KW_TRANSACTIONS | 
KW_UNARCHIVE | KW_UNDO | KW_UNIONTYPE | KW_UNLOCK | KW_UNSET
     | KW_UNSIGNED | KW_URI | KW_USE | KW_UTC | KW_UTCTIMESTAMP | KW_VALUE_TYPE 
| KW_VIEW | KW_WEEK | KW_WHILE | KW_YEAR
     | KW_WORK
     | KW_TRANSACTION

http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 6d7e63e..98448e4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -13017,7 +13017,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   private Map<String, String> validateAndAddDefaultProperties(
       Map<String, String> tblProp, boolean isExt, StorageFormat storageFormat,
       String qualifiedTableName, List<Order> sortCols, boolean 
isMaterialization,
-      boolean isTemporaryTable) throws SemanticException {
+      boolean isTemporaryTable, boolean isTransactional) throws 
SemanticException {
     Map<String, String> retValue;
     if (tblProp == null) {
       retValue = new HashMap<String, String>();
@@ -13055,16 +13055,16 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         MetastoreConf.getBoolVar(conf, 
MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID) &&
         HiveConf.getBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY) &&
         DbTxnManager.class.getCanonicalName().equals(HiveConf.getVar(conf, 
ConfVars.HIVE_TXN_MANAGER));
-    if ((makeInsertOnly || makeAcid)
+    if ((makeInsertOnly || makeAcid || isTransactional)
         && !isExt  && !isMaterialization && 
StringUtils.isBlank(storageFormat.getStorageHandler())
         //don't overwrite user choice if transactional attribute is explicitly 
set
         && 
!retValue.containsKey(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)) {
-      if (makeInsertOnly) {
+      if (makeInsertOnly || isTransactional) {
         retValue.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
         retValue.put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES,
             TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY);
       }
-      if (makeAcid) {
+      if (makeAcid || isTransactional) {
         retValue = convertToAcidByDefault(storageFormat, qualifiedTableName, 
sortCols, retValue);
       }
     }
@@ -13165,10 +13165,12 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     boolean isExt = false;
     boolean isTemporary = false;
     boolean isMaterialization = false;
+    boolean isTransactional = false;
     ASTNode selectStmt = null;
     final int CREATE_TABLE = 0; // regular CREATE TABLE
     final int CTLT = 1; // CREATE TABLE LIKE ... (CTLT)
     final int CTAS = 2; // CREATE TABLE AS SELECT ... (CTAS)
+    final int ctt = 3; // CREATE TRANSACTIONAL TABLE
     int command_type = CREATE_TABLE;
     List<String> skewedColNames = new ArrayList<String>();
     List<List<String>> skewedValues = new ArrayList<List<String>>();
@@ -13205,6 +13207,10 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         isTemporary = true;
         isMaterialization = MATERIALIZATION_MARKER.equals(child.getText());
         break;
+      case HiveParser.KW_TRANSACTIONAL:
+        isTransactional = true;
+        command_type = ctt;
+        break;
       case HiveParser.TOK_LIKETABLE:
         if (child.getChildCount() > 0) {
           likeTableName = getUnescapedName((ASTNode) child.getChild(0));
@@ -13315,7 +13321,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       }
     }
 
-    if (command_type == CREATE_TABLE || command_type == CTLT) {
+    if (command_type == CREATE_TABLE || command_type == CTLT || command_type 
== ctt) {
       queryState.setCommandType(HiveOperation.CREATETABLE);
     } else if (command_type == CTAS) {
       queryState.setCommandType(HiveOperation.CREATETABLE_AS_SELECT);
@@ -13377,7 +13383,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
             "Partition columns can only declared using their name and types in 
regular CREATE TABLE statements");
       }
       tblProps = validateAndAddDefaultProperties(
-          tblProps, isExt, storageFormat, dbDotTab, sortCols, 
isMaterialization, isTemporary);
+          tblProps, isExt, storageFormat, dbDotTab, sortCols, 
isMaterialization, isTemporary, isTransactional);
       addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, 
isTemporary, tblProps);
 
       CreateTableDesc crtTblDesc = new CreateTableDesc(dbDotTab, isExt, 
isTemporary, cols, partCols,
@@ -13398,10 +13404,34 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
             crtTblDesc)));
       break;
+    case ctt: // CREATE TRANSACTIONAL TABLE
+      if (isExt) {
+        throw new SemanticException(
+            qualifiedTabName[1] + " cannot be declared transactional because 
it's an external table");
+      }
+      tblProps = validateAndAddDefaultProperties(tblProps, isExt, 
storageFormat, dbDotTab, sortCols, isMaterialization,
+          isTemporary, isTransactional);
+      addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, false, 
tblProps);
+
+      CreateTableDesc crtTranTblDesc =
+          new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols, 
bucketCols, sortCols, numBuckets,
+              rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, 
rowFormatParams.collItemDelim,
+              rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, 
storageFormat.getInputFormat(),
+              storageFormat.getOutputFormat(), location, 
storageFormat.getSerde(), storageFormat.getStorageHandler(),
+              storageFormat.getSerdeProps(), tblProps, ifNotExists, 
skewedColNames, skewedValues, primaryKeys,
+              foreignKeys, uniqueConstraints, notNullConstraints, 
defaultConstraints, checkConstraints);
+      crtTranTblDesc.setStoredAsSubDirectories(storedAsDirs);
+      crtTranTblDesc.setNullFormat(rowFormatParams.nullFormat);
+
+      crtTranTblDesc.validate(conf);
+      // outputs is empty, which means this create table happens in the current
+      // database.
+      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), 
crtTranTblDesc)));
+      break;
 
     case CTLT: // create table like <tbl_name>
       tblProps = validateAndAddDefaultProperties(
-          tblProps, isExt, storageFormat, dbDotTab, sortCols, 
isMaterialization, isTemporary);
+          tblProps, isExt, storageFormat, dbDotTab, sortCols, 
isMaterialization, isTemporary, isTransactional);
       addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, 
isTemporary, tblProps);
 
       if (isTemporary) {
@@ -13485,7 +13515,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       }
 
       tblProps = validateAndAddDefaultProperties(
-          tblProps, isExt, storageFormat, dbDotTab, sortCols, 
isMaterialization, isTemporary);
+          tblProps, isExt, storageFormat, dbDotTab, sortCols, 
isMaterialization, isTemporary, isTransactional);
       addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, 
isTemporary, tblProps);
       tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, 
isTemporary, cols,
           partColNames, bucketCols, sortCols, numBuckets, 
rowFormatParams.fieldDelim,

http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/test/queries/clientnegative/create_external_transactional.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/create_external_transactional.q 
b/ql/src/test/queries/clientnegative/create_external_transactional.q
new file mode 100644
index 0000000..a7f7e79
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/create_external_transactional.q
@@ -0,0 +1,5 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+
+create transactional external table transactional_external (a int, b 
varchar(128)) clustered by (b) into 2 buckets;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/test/queries/clientpositive/create_transactional_full_acid.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/create_transactional_full_acid.q 
b/ql/src/test/queries/clientpositive/create_transactional_full_acid.q
new file mode 100644
index 0000000..a2b5ba0
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/create_transactional_full_acid.q
@@ -0,0 +1,28 @@
+--! qt:dataset:srcpart
+set hive.mapred.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop table if exists target;
+drop table if exists source;
+drop table if exists transactional_table_test;
+
+
+CREATE TRANSACTIONAL TABLE transactional_table_test(key string, value string) 
PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC;
+
+desc formatted transactional_table_test;
+
+insert into table transactional_table_test partition(ds)  select key,value,ds 
from srcpart;
+
+create transactional table target (a int, b int) partitioned by (p int, q int) 
clustered by (a) into 2  buckets stored as orc;
+create transactional table source (a1 int, b1 int, p1 int, q1 int) clustered 
by (a1) into 2  buckets stored as orc;
+insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), (5,6,1,3), 
(7,8,2,2);
+
+-- the intent here is to record the set of ReadEntity and WriteEntity objects 
for these 2 update statements
+update target set b = 1 where p in (select t.q1 from source t where t.a1=5);
+
+update source set b1 = 1 where p1 in (select t.q from target t where t.p=2);
+delete from target where p in (select t.q1 from source t where t.a1 = 5);
+
+merge into target t using source s on t.a = s.a1 when matched and p = 1 and q 
= 2 then update set b = 1 when matched and p = 2 and q = 2 then delete when not 
matched and a1 > 100 then insert values(s.a1,s.b1,s.p1, s.q1);

http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/test/queries/clientpositive/create_transactional_insert_only.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/create_transactional_insert_only.q 
b/ql/src/test/queries/clientpositive/create_transactional_insert_only.q
new file mode 100644
index 0000000..d65701b
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/create_transactional_insert_only.q
@@ -0,0 +1,13 @@
+--! qt:dataset:srcpart
+set hive.mapred.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop table if exists transactional_insert_only_table;
+
+create transactional table transactional_insert_only_table(key string, value 
string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS;
+
+desc formatted transactional_insert_only_table;
+
+insert into table transactional_insert_only_table partition(ds)  select 
key,value,ds from srcpart;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/test/results/clientnegative/create_external_transactional.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientnegative/create_external_transactional.q.out 
b/ql/src/test/results/clientnegative/create_external_transactional.q.out
new file mode 100644
index 0000000..6a0f5c1
--- /dev/null
+++ b/ql/src/test/results/clientnegative/create_external_transactional.q.out
@@ -0,0 +1 @@
+FAILED: SemanticException transactional_external cannot be declared 
transactional because it's an external table

http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/test/results/clientpositive/create_transactional_full_acid.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/create_transactional_full_acid.q.out 
b/ql/src/test/results/clientpositive/create_transactional_full_acid.q.out
new file mode 100644
index 0000000..e324d5e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/create_transactional_full_acid.q.out
@@ -0,0 +1,197 @@
+PREHOOK: query: drop table if exists target
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists target
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists source
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists source
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists transactional_table_test
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists transactional_table_test
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TRANSACTIONAL TABLE transactional_table_test(key 
string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 
BUCKETS STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@transactional_table_test
+POSTHOOK: query: CREATE TRANSACTIONAL TABLE transactional_table_test(key 
string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 
BUCKETS STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@transactional_table_test
+PREHOOK: query: desc formatted transactional_table_test
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@transactional_table_test
+POSTHOOK: query: desc formatted transactional_table_test
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@transactional_table_test
+# col_name             data_type               comment             
+key                    string                                      
+value                  string                                      
+                
+# Partition Information                 
+# col_name             data_type               comment             
+ds                     string                                      
+                
+# Detailed Table Information            
+Database:              default                  
+#### A masked pattern was here ####
+Retention:             0                        
+#### A masked pattern was here ####
+Table Type:            MANAGED_TABLE            
+Table Parameters:               
+       COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\"}
+       bucketing_version       2                   
+       numFiles                0                   
+       numPartitions           0                   
+       numRows                 0                   
+       rawDataSize             0                   
+       totalSize               0                   
+       transactional           true                
+       transactional_properties        default             
+#### A masked pattern was here ####
+                
+# Storage Information           
+SerDe Library:         org.apache.hadoop.hive.ql.io.orc.OrcSerde        
+InputFormat:           org.apache.hadoop.hive.ql.io.orc.OrcInputFormat  
+OutputFormat:          org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat        
 
+Compressed:            No                       
+Num Buckets:           2                        
+Bucket Columns:        [key]                    
+Sort Columns:          []                       
+Storage Desc Params:            
+       serialization.format    1                   
+PREHOOK: query: insert into table transactional_table_test partition(ds)  
select key,value,ds from srcpart
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@transactional_table_test
+POSTHOOK: query: insert into table transactional_table_test partition(ds)  
select key,value,ds from srcpart
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@transactional_table_test@ds=2008-04-08
+POSTHOOK: Output: default@transactional_table_test@ds=2008-04-09
+POSTHOOK: Lineage: transactional_table_test PARTITION(ds=2008-04-08).key 
SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: transactional_table_test PARTITION(ds=2008-04-08).value 
SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), 
]
+POSTHOOK: Lineage: transactional_table_test PARTITION(ds=2008-04-09).key 
SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: transactional_table_test PARTITION(ds=2008-04-09).value 
SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), 
]
+PREHOOK: query: create transactional table target (a int, b int) partitioned 
by (p int, q int) clustered by (a) into 2  buckets stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@target
+POSTHOOK: query: create transactional table target (a int, b int) partitioned 
by (p int, q int) clustered by (a) into 2  buckets stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@target
+PREHOOK: query: create transactional table source (a1 int, b1 int, p1 int, q1 
int) clustered by (a1) into 2  buckets stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@source
+POSTHOOK: query: create transactional table source (a1 int, b1 int, p1 int, q1 
int) clustered by (a1) into 2  buckets stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@source
+PREHOOK: query: insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), 
(5,6,1,3), (7,8,2,2)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@target
+POSTHOOK: query: insert into target partition(p,q) values (1,2,1,2), 
(3,4,1,2), (5,6,1,3), (7,8,2,2)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@target@p=1/q=2
+POSTHOOK: Output: default@target@p=1/q=3
+POSTHOOK: Output: default@target@p=2/q=2
+POSTHOOK: Lineage: target PARTITION(p=1,q=2).a SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=1,q=2).b SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=1,q=3).a SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=1,q=3).b SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=2,q=2).a SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=2,q=2).b SCRIPT []
+PREHOOK: query: update target set b = 1 where p in (select t.q1 from source t 
where t.a1=5)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@source
+PREHOOK: Input: default@target
+PREHOOK: Input: default@target@p=1/q=2
+PREHOOK: Input: default@target@p=1/q=3
+PREHOOK: Input: default@target@p=2/q=2
+PREHOOK: Output: default@target@p=1/q=2
+PREHOOK: Output: default@target@p=1/q=3
+PREHOOK: Output: default@target@p=2/q=2
+POSTHOOK: query: update target set b = 1 where p in (select t.q1 from source t 
where t.a1=5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@source
+POSTHOOK: Input: default@target
+POSTHOOK: Input: default@target@p=1/q=2
+POSTHOOK: Input: default@target@p=1/q=3
+POSTHOOK: Input: default@target@p=2/q=2
+POSTHOOK: Output: default@target@p=1/q=2
+POSTHOOK: Output: default@target@p=1/q=3
+POSTHOOK: Output: default@target@p=2/q=2
+PREHOOK: query: update source set b1 = 1 where p1 in (select t.q from target t 
where t.p=2)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@source
+PREHOOK: Input: default@target
+PREHOOK: Input: default@target@p=2/q=2
+PREHOOK: Output: default@source
+POSTHOOK: query: update source set b1 = 1 where p1 in (select t.q from target 
t where t.p=2)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@source
+POSTHOOK: Input: default@target
+POSTHOOK: Input: default@target@p=2/q=2
+POSTHOOK: Output: default@source
+PREHOOK: query: delete from target where p in (select t.q1 from source t where 
t.a1 = 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@source
+PREHOOK: Input: default@target
+PREHOOK: Input: default@target@p=1/q=2
+PREHOOK: Input: default@target@p=1/q=3
+PREHOOK: Input: default@target@p=2/q=2
+PREHOOK: Output: default@target@p=1/q=2
+PREHOOK: Output: default@target@p=1/q=3
+PREHOOK: Output: default@target@p=2/q=2
+POSTHOOK: query: delete from target where p in (select t.q1 from source t 
where t.a1 = 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@source
+POSTHOOK: Input: default@target
+POSTHOOK: Input: default@target@p=1/q=2
+POSTHOOK: Input: default@target@p=1/q=3
+POSTHOOK: Input: default@target@p=2/q=2
+POSTHOOK: Output: default@target@p=1/q=2
+POSTHOOK: Output: default@target@p=1/q=3
+POSTHOOK: Output: default@target@p=2/q=2
+PREHOOK: query: merge into target t using source s on t.a = s.a1 when matched 
and p = 1 and q = 2 then update set b = 1 when matched and p = 2 and q = 2 then 
delete when not matched and a1 > 100 then insert values(s.a1,s.b1,s.p1, s.q1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@source
+PREHOOK: Input: default@target
+PREHOOK: Input: default@target@p=1/q=2
+PREHOOK: Input: default@target@p=1/q=3
+PREHOOK: Input: default@target@p=2/q=2
+PREHOOK: Output: default@merge_tmp_table
+PREHOOK: Output: default@target
+PREHOOK: Output: default@target@p=1/q=2
+PREHOOK: Output: default@target@p=1/q=2
+PREHOOK: Output: default@target@p=1/q=3
+PREHOOK: Output: default@target@p=1/q=3
+PREHOOK: Output: default@target@p=2/q=2
+PREHOOK: Output: default@target@p=2/q=2
+POSTHOOK: query: merge into target t using source s on t.a = s.a1 when matched 
and p = 1 and q = 2 then update set b = 1 when matched and p = 2 and q = 2 then 
delete when not matched and a1 > 100 then insert values(s.a1,s.b1,s.p1, s.q1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@source
+POSTHOOK: Input: default@target
+POSTHOOK: Input: default@target@p=1/q=2
+POSTHOOK: Input: default@target@p=1/q=3
+POSTHOOK: Input: default@target@p=2/q=2
+POSTHOOK: Output: default@merge_tmp_table
+POSTHOOK: Output: default@target@p=1/q=2
+POSTHOOK: Output: default@target@p=1/q=2
+POSTHOOK: Output: default@target@p=1/q=3
+POSTHOOK: Output: default@target@p=1/q=3
+POSTHOOK: Output: default@target@p=2/q=2
+POSTHOOK: Output: default@target@p=2/q=2
+POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION 
[(target)t.FieldSchema(name:ROW__ID, 
type:struct<writeId:bigint,bucketId:int,rowId:bigint>, comment:), 
(target)t.FieldSchema(name:p, type:int, comment:null), 
(target)t.FieldSchema(name:q, type:int, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/9c907769/ql/src/test/results/clientpositive/create_transactional_insert_only.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/create_transactional_insert_only.q.out 
b/ql/src/test/results/clientpositive/create_transactional_insert_only.q.out
new file mode 100644
index 0000000..1c6e8f5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/create_transactional_insert_only.q.out
@@ -0,0 +1,75 @@
+PREHOOK: query: drop table if exists transactional_insert_only_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists transactional_insert_only_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create transactional table transactional_insert_only_table(key 
string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@transactional_insert_only_table
+POSTHOOK: query: create transactional table 
transactional_insert_only_table(key string, value string) PARTITIONED BY(ds 
string) CLUSTERED BY(key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@transactional_insert_only_table
+PREHOOK: query: desc formatted transactional_insert_only_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@transactional_insert_only_table
+POSTHOOK: query: desc formatted transactional_insert_only_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@transactional_insert_only_table
+# col_name             data_type               comment             
+key                    string                                      
+value                  string                                      
+                
+# Partition Information                 
+# col_name             data_type               comment             
+ds                     string                                      
+                
+# Detailed Table Information            
+Database:              default                  
+#### A masked pattern was here ####
+Retention:             0                        
+#### A masked pattern was here ####
+Table Type:            MANAGED_TABLE            
+Table Parameters:               
+       COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\"}
+       bucketing_version       2                   
+       numFiles                0                   
+       numPartitions           0                   
+       numRows                 0                   
+       rawDataSize             0                   
+       totalSize               0                   
+       transactional           true                
+       transactional_properties        insert_only         
+#### A masked pattern was here ####
+                
+# Storage Information           
+SerDe Library:         org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe      
 
+InputFormat:           org.apache.hadoop.mapred.TextInputFormat         
+OutputFormat:          
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat       
+Compressed:            No                       
+Num Buckets:           2                        
+Bucket Columns:        [key]                    
+Sort Columns:          []                       
+Storage Desc Params:            
+       serialization.format    1                   
+PREHOOK: query: insert into table transactional_insert_only_table 
partition(ds)  select key,value,ds from srcpart
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@transactional_insert_only_table
+POSTHOOK: query: insert into table transactional_insert_only_table 
partition(ds)  select key,value,ds from srcpart
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@transactional_insert_only_table@ds=2008-04-08
+POSTHOOK: Output: default@transactional_insert_only_table@ds=2008-04-09
+POSTHOOK: Lineage: transactional_insert_only_table 
PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, 
type:string, comment:default), ]
+POSTHOOK: Lineage: transactional_insert_only_table 
PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, 
type:string, comment:default), ]
+POSTHOOK: Lineage: transactional_insert_only_table 
PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, 
type:string, comment:default), ]
+POSTHOOK: Lineage: transactional_insert_only_table 
PARTITION(ds=2008-04-09).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, 
type:string, comment:default), ]

Reply via email to