Repository: hive
Updated Branches:
  refs/heads/master c2e335fc0 -> 1223031b9


HIVE-18516 : load data should rename files consistent with insert statements 
for ACID Tables (Deepak Jaiswal, reviewed by Eugene Koifman and Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1223031b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1223031b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1223031b

Branch: refs/heads/master
Commit: 1223031b9afdb7e22f9da09f6c8bb47037e447d0
Parents: c2e335f
Author: djaiswal <djais...@apache.org>
Authored: Fri Feb 2 13:25:53 2018 -0800
Committer: djaiswal <djais...@apache.org>
Committed: Fri Feb 2 13:27:08 2018 -0800

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |  1 +
 .../apache/hadoop/hive/ql/metadata/Hive.java    | 75 +++++++++++++-------
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |  8 ---
 .../apache/hadoop/hive/ql/TestTxnLoadData.java  | 12 ++--
 .../hive/ql/metadata/TestHiveCopyFiles.java     | 12 ++--
 .../clientnegative/load_data_into_acid.q        | 20 ------
 .../test/queries/clientpositive/smb_mapjoin_7.q |  4 +-
 .../clientnegative/load_data_into_acid.q.out    | 33 ---------
 .../clientpositive/beeline/smb_mapjoin_7.q.out  |  8 +--
 .../results/clientpositive/smb_mapjoin_7.q.out  |  8 +--
 .../clientpositive/spark/smb_mapjoin_7.q.out    |  8 +--
 11 files changed, 75 insertions(+), 114 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index d86ff58..70d0749 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -570,6 +570,7 @@ minillaplocal.query.files=\
   list_bucket_dml_10.q,\
   llap_partitioned.q,\
   llap_vector_nohybridgrace.q,\
+  load_data_acid_rename.q,\
   load_dyn_part5.q,\
   lvj_mapjoin.q,\
   materialized_view_create_rewrite_dummy.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 3b97dac..b1e05df 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -31,18 +31,8 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
 import java.util.Map.Entry;
-import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
@@ -1881,11 +1871,12 @@ public class Hive {
           // base_x.  (there is Insert Overwrite and Load Data Overwrite)
           boolean isAutoPurge = 
"true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
           replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, 
getConf(),
-              isSrcLocal, isAutoPurge, newFiles, filter, 
isMmTableWrite?true:false, !tbl.isTemporary());
+              isSrcLocal, isAutoPurge, newFiles, filter, isMmTableWrite, 
!tbl.isTemporary());
         } else {
           FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
           copyFiles(conf, loadPath, destPath, fs, isSrcLocal, 
isAcidIUDoperation,
-            (loadFileType == LoadFileType.OVERWRITE_EXISTING), newFiles);
+            (loadFileType == LoadFileType.OVERWRITE_EXISTING), newFiles, 
tbl.getNumBuckets() > 0,
+                  isFullAcidTable);
         }
       }
       perfLogger.PerfLogEnd("MoveTask", "FileMoves");
@@ -2432,7 +2423,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
         try {
           FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf);
           copyFiles(sessionConf, loadPath, destPath, fs, isSrcLocal, 
isAcidIUDoperation,
-            loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles);
+            loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles,
+                  tbl.getNumBuckets() > 0 ? true : false, isFullAcidTable);
         } catch (IOException e) {
           throw new HiveException("addFiles: filesystem error in check phase", 
e);
         }
@@ -3302,8 +3294,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
   }
 
   private static void copyFiles(final HiveConf conf, final FileSystem destFs,
-            FileStatus[] srcs, final FileSystem srcFs, final Path destf, final 
boolean isSrcLocal,
-            boolean isOverwrite, final List<Path> newFiles) throws 
HiveException {
+            FileStatus[] srcs, final FileSystem srcFs, final Path destf,
+            final boolean isSrcLocal, boolean isOverwrite,
+            final List<Path> newFiles, boolean acidRename) throws 
HiveException {
 
     final HdfsUtils.HadoopFileStatus fullDestStatus;
     try {
@@ -3319,6 +3312,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
     final ExecutorService pool = 
conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0 ?
         
Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname,
 25),
         new 
ThreadFactoryBuilder().setDaemon(true).setNameFormat("Move-Thread-%d").build()) 
: null;
+    // For ACID non-bucketed case, the filenames have to be in the format 
consistent with INSERT/UPDATE/DELETE Ops,
+    // i.e, like 000000_0, 000001_0_copy_1, 000002_0.gz etc.
+    // The extension is only maintained for files which are compressed.
+    int taskId = 0;
+    // Sort the files
+    Arrays.sort(srcs);
     for (FileStatus src : srcs) {
       FileStatus[] files;
       if (src.isDirectory()) {
@@ -3333,6 +3332,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
       }
 
       final SessionState parentSession = SessionState.get();
+      // Sort the files
+      Arrays.sort(files);
       for (final FileStatus srcFile : files) {
         final Path srcP = srcFile.getPath();
         final boolean needToCopy = needToCopy(srcP, destf, srcFs, destFs);
@@ -3346,7 +3347,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
         // copy from source to destination, we will inherit the destination's 
parent group ownership.
         if (null == pool) {
           try {
-            Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, 
isSrcLocal, isOverwrite, isRenameAllowed);
+            Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, 
isSrcLocal, isOverwrite, isRenameAllowed,
+                    acidRename ? taskId++ : -1);
 
             if (null != newFiles) {
               newFiles.add(destPath);
@@ -3355,6 +3357,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
             throw getHiveException(e, msg, "Failed to move: {}");
           }
         } else {
+          // future only takes final or seemingly final values. Make a final 
copy of taskId
+          final int finalTaskId = acidRename ? taskId++ : -1;
           futures.add(pool.submit(new Callable<ObjectPair<Path, Path>>() {
             @Override
             public ObjectPair<Path, Path> call() throws HiveException {
@@ -3362,7 +3366,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
               try {
                 Path destPath =
-                    mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, 
isOverwrite, isRenameAllowed);
+                    mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, 
isOverwrite, isRenameAllowed, finalTaskId);
 
                 if (null != newFiles) {
                   newFiles.add(destPath);
@@ -3432,6 +3436,10 @@ private void constructOneLBLocationMap(FileStatus fSta,
     return ShimLoader.getHadoopShims().getPathWithoutSchemeAndAuthority(path);
   }
 
+  private static String getPathName(int taskId) {
+    return Utilities.replaceTaskId("000000", taskId) + "_0";
+  }
+
   /**
    * <p>
    *   Moves a file from one {@link Path} to another. If {@code 
isRenameAllowed} is true then the
@@ -3459,15 +3467,22 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @throws IOException if there was an issue moving the file
    */
   private static Path mvFile(HiveConf conf, FileSystem sourceFs, Path 
sourcePath, FileSystem destFs, Path destDirPath,
-                             boolean isSrcLocal, boolean isOverwrite, boolean 
isRenameAllowed) throws IOException {
+                             boolean isSrcLocal, boolean isOverwrite, boolean 
isRenameAllowed,
+                             int taskId) throws IOException {
 
     // Strip off the file type, if any so we don't make:
     // 000000_0.gz -> 000000_0.gz_copy_1
     final String fullname = sourcePath.getName();
-    final String name = FilenameUtils.getBaseName(sourcePath.getName());
+    final String name;
+    if (taskId == -1) { // non-acid
+      name = FilenameUtils.getBaseName(sourcePath.getName());
+    } else { // acid
+      name = getPathName(taskId);
+    }
     final String type = FilenameUtils.getExtension(sourcePath.getName());
 
-    Path destFilePath = new Path(destDirPath, fullname);
+    // Incase of ACID, the file is ORC so the extension is not relevant and 
should not be inherited.
+    Path destFilePath = new Path(destDirPath, taskId == -1 ? fullname : name);
 
     /*
     * The below loop may perform bad when the destination file already exists 
and it has too many _copy_
@@ -3482,7 +3497,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
         destFs.delete(destFilePath, false);
         break;
       }
-      destFilePath =  new Path(destDirPath, name + (Utilities.COPY_KEYWORD + 
counter) + (!type.isEmpty() ? "." + type : ""));
+      destFilePath =  new Path(destDirPath, name + (Utilities.COPY_KEYWORD + 
counter) +
+              ((taskId == -1 && !type.isEmpty()) ? "." + type : ""));
     }
 
     if (isRenameAllowed) {
@@ -3769,15 +3785,16 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @param destf directory to move files into
    * @param fs Filesystem
    * @param isSrcLocal true if source is on local file system
-   * @param isAcid true if this is an ACID based write
+   * @param isAcidIUD true if this is an ACID based Insert/Update/Delete
    * @param isOverwrite if true, then overwrite if destination file exist, 
else add a duplicate copy
    * @param newFiles if this is non-null, a list of files that were created as 
a result of this
    *                 move will be returned.
    * @throws HiveException
    */
   static protected void copyFiles(HiveConf conf, Path srcf, Path destf, 
FileSystem fs,
-                                  boolean isSrcLocal, boolean isAcid,
-                                  boolean isOverwrite, List<Path> newFiles) 
throws HiveException {
+                                  boolean isSrcLocal, boolean isAcidIUD,
+                                  boolean isOverwrite, List<Path> newFiles, 
boolean isBucketed,
+                                  boolean isFullAcidTable) throws 
HiveException {
     try {
       // create the destination if it does not exist
       if (!fs.exists(destf)) {
@@ -3806,10 +3823,14 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
     // If we're moving files around for an ACID write then the rules and paths 
are all different.
     // You can blame this on Owen.
-    if (isAcid) {
+    if (isAcidIUD) {
       moveAcidFiles(srcFs, srcs, destf, newFiles);
     } else {
-      copyFiles(conf, fs, srcs, srcFs, destf, isSrcLocal, isOverwrite, 
newFiles);
+      // For ACID non-bucketed case, the filenames have to be in the format 
consistent with INSERT/UPDATE/DELETE Ops,
+      // i.e, like 000000_0, 000001_0_copy_1, 000002_0.gz etc.
+      // The extension is only maintained for files which are compressed.
+      copyFiles(conf, fs, srcs, srcFs, destf, isSrcLocal, isOverwrite,
+              newFiles, isFullAcidTable && !isBucketed);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 5868d4d..54f5bab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -159,14 +159,6 @@ public class LoadSemanticAnalyzer extends 
BaseSemanticAnalyzer {
           throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast,
               "source contains directory: " + oneSrc.getPath().toString()));
         }
-        if(AcidUtils.isAcidTable(table)) {
-          if(!AcidUtils.originalBucketFilter.accept(oneSrc.getPath())) {
-            //acid files (e.g. bucket_0000) have ROW_ID embedded in them and 
so can't be simply
-            //copied to a table so only allow non-acid files for now
-            throw new 
SemanticException(ErrorMsg.ACID_LOAD_DATA_INVALID_FILE_NAME,
-              oneSrc.getPath().getName(), table.getFullyQualifiedName());
-          }
-        }
       }
     } catch (IOException e) {
       // Has to use full name to make sure it does not conflict with

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
index a9cba45..3a3272f 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
@@ -264,10 +264,10 @@ public class TestTxnLoadData extends 
TxnCommandsBaseForTests {
       //from Load Data into acid converted table
       {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t1\t2", 
"t/delta_0000024_0000024_0000/000000_0"},
       {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":1}\t3\t4", 
"t/delta_0000024_0000024_0000/000000_0"},
-      {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":2}\t2\t2", 
"t/delta_0000024_0000024_0000/000000_0_copy_1"},
-      {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":3}\t3\t3", 
"t/delta_0000024_0000024_0000/000000_0_copy_1"},
-      {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":4}\t4\t4", 
"t/delta_0000024_0000024_0000/000000_0_copy_2"},
-      {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":5}\t5\t5", 
"t/delta_0000024_0000024_0000/000000_0_copy_2"},
+      {"{\"transactionid\":24,\"bucketid\":536936448,\"rowid\":0}\t2\t2", 
"t/delta_0000024_0000024_0000/000001_0"},
+      {"{\"transactionid\":24,\"bucketid\":536936448,\"rowid\":1}\t3\t3", 
"t/delta_0000024_0000024_0000/000001_0"},
+      {"{\"transactionid\":24,\"bucketid\":537001984,\"rowid\":0}\t4\t4", 
"t/delta_0000024_0000024_0000/000002_0"},
+      {"{\"transactionid\":24,\"bucketid\":537001984,\"rowid\":1}\t5\t5", 
"t/delta_0000024_0000024_0000/000002_0"},
     };
     checkResult(expected, testQuery, isVectorized, "load data inpath");
 
@@ -281,7 +281,7 @@ public class TestTxnLoadData extends 
TxnCommandsBaseForTests {
     String[][] expected2 = new String[][] {
       {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":0}\t5\t6", 
"t/base_0000030/000000_0"},
       {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":1}\t7\t8", 
"t/base_0000030/000000_0"},
-      {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":2}\t8\t8", 
"t/base_0000030/000000_0_copy_1"}
+      {"{\"transactionid\":30,\"bucketid\":536936448,\"rowid\":0}\t8\t8", 
"t/base_0000030/000001_0"}
     };
     checkResult(expected2, testQuery, isVectorized, "load data inpath 
overwrite");
 
@@ -293,7 +293,7 @@ public class TestTxnLoadData extends 
TxnCommandsBaseForTests {
     String[][] expected3 = new String[][] {
       {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":0}\t5\t6", 
"t/base_0000033/bucket_00000"},
       {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":1}\t7\t8", 
"t/base_0000033/bucket_00000"},
-      {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":2}\t8\t8", 
"t/base_0000033/bucket_00000"},
+      {"{\"transactionid\":30,\"bucketid\":536936448,\"rowid\":0}\t8\t8", 
"t/base_0000033/bucket_00001"},
       {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t9\t9", 
"t/base_0000033/bucket_00000"}
 
     };

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveCopyFiles.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveCopyFiles.java 
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveCopyFiles.java
index c6a4a89..a20a2ae 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveCopyFiles.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveCopyFiles.java
@@ -83,7 +83,7 @@ public class TestHiveCopyFiles {
     FileSystem targetFs = targetPath.getFileSystem(hiveConf);
 
     try {
-      Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, 
isSourceLocal, NO_ACID, false,null);
+      Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, 
isSourceLocal, NO_ACID, false,null, false, false);
     } catch (HiveException e) {
       e.printStackTrace();
       assertTrue("Hive.copyFiles() threw an unexpected exception.", false);
@@ -107,7 +107,7 @@ public class TestHiveCopyFiles {
     FileSystem targetFs = targetPath.getFileSystem(hiveConf);
 
     try {
-      Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, 
isSourceLocal, NO_ACID, false, null);
+      Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, 
isSourceLocal, NO_ACID, false, null, false, false);
     } catch (HiveException e) {
       e.printStackTrace();
       assertTrue("Hive.copyFiles() threw an unexpected exception.", false);
@@ -127,7 +127,7 @@ public class TestHiveCopyFiles {
     sourceFolder.newFile("000001_0.gz");
 
     try {
-      Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, 
isSourceLocal, NO_ACID, false, null);
+      Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, 
isSourceLocal, NO_ACID, false, null, false, false);
     } catch (HiveException e) {
       e.printStackTrace();
       assertTrue("Hive.copyFiles() threw an unexpected exception.", false);
@@ -158,7 +158,7 @@ public class TestHiveCopyFiles {
     Mockito.when(spyTargetFs.getUri()).thenReturn(URI.create("hdfs://" + 
targetPath.toUri().getPath()));
 
     try {
-      Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, 
isSourceLocal, NO_ACID, false, null);
+      Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, 
isSourceLocal, NO_ACID, false, null, false, false);
     } catch (HiveException e) {
       e.printStackTrace();
       assertTrue("Hive.copyFiles() threw an unexpected exception.", false);
@@ -185,7 +185,7 @@ public class TestHiveCopyFiles {
     Mockito.when(spyTargetFs.getUri()).thenReturn(URI.create("hdfs://" + 
targetPath.toUri().getPath()));
 
     try {
-      Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, 
isSourceLocal, NO_ACID, false, null);
+      Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, 
isSourceLocal, NO_ACID, false, null, false, false);
     } catch (HiveException e) {
       e.printStackTrace();
       assertTrue("Hive.copyFiles() threw an unexpected exception.", false);
@@ -205,7 +205,7 @@ public class TestHiveCopyFiles {
     sourceFolder.newFile("000001_0.gz");
 
     try {
-      Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, 
isSourceLocal, NO_ACID, false, null);
+      Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, 
isSourceLocal, NO_ACID, false, null, false, false);
     } catch (HiveException e) {
       e.printStackTrace();
       assertTrue("Hive.copyFiles() threw an unexpected exception.", false);

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/test/queries/clientnegative/load_data_into_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/load_data_into_acid.q 
b/ql/src/test/queries/clientnegative/load_data_into_acid.q
deleted file mode 100644
index 2ac5b56..0000000
--- a/ql/src/test/queries/clientnegative/load_data_into_acid.q
+++ /dev/null
@@ -1,20 +0,0 @@
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-create table acid_ivot(
-    ctinyint TINYINT,
-    csmallint SMALLINT,
-    cint INT,
-    cbigint BIGINT,
-    cfloat FLOAT,
-    cdouble DOUBLE,
-    cstring1 STRING,
-    cstring2 STRING,
-    ctimestamp1 TIMESTAMP,
-    ctimestamp2 TIMESTAMP,
-    cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) stored as orc TBLPROPERTIES ('transactional'='true');
-
-LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot;
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_7.q 
b/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
index 4a6afb0..fed931c 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
@@ -16,8 +16,8 @@ create table smb_join_results(k1 int, v1 string, k2 int, v2 
string);
 create table smb_join_results_empty_bigtable(k1 int, v1 string, k2 int, v2 
string);
 create table normal_join_results(k1 int, v1 string, k2 int, v2 string);
 
-load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1;
-load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1;
+load data local inpath '../../data/files/empty/000000_0' into table 
smb_bucket4_1;
+load data local inpath '../../data/files/empty/000001_0' into table 
smb_bucket4_1;
 
 insert overwrite table smb_bucket4_2
 select * from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/test/results/clientnegative/load_data_into_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/load_data_into_acid.q.out 
b/ql/src/test/results/clientnegative/load_data_into_acid.q.out
deleted file mode 100644
index 46b5cdd..0000000
--- a/ql/src/test/results/clientnegative/load_data_into_acid.q.out
+++ /dev/null
@@ -1,33 +0,0 @@
-PREHOOK: query: create table acid_ivot(
-    ctinyint TINYINT,
-    csmallint SMALLINT,
-    cint INT,
-    cbigint BIGINT,
-    cfloat FLOAT,
-    cdouble DOUBLE,
-    cstring1 STRING,
-    cstring2 STRING,
-    ctimestamp1 TIMESTAMP,
-    ctimestamp2 TIMESTAMP,
-    cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) stored as orc TBLPROPERTIES ('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@acid_ivot
-POSTHOOK: query: create table acid_ivot(
-    ctinyint TINYINT,
-    csmallint SMALLINT,
-    cint INT,
-    cbigint BIGINT,
-    cfloat FLOAT,
-    cdouble DOUBLE,
-    cstring1 STRING,
-    cstring2 STRING,
-    ctimestamp1 TIMESTAMP,
-    ctimestamp2 TIMESTAMP,
-    cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) stored as orc TBLPROPERTIES ('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@acid_ivot
-FAILED: SemanticException [Error 30023]: alltypesorc file name is not valid in 
Load Data into Acid table default.acid_ivot.  Examples of valid names are: 
00000_0, 00000_0_copy_1

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out 
b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
index 7a6f8c5..4b1313d 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
@@ -38,19 +38,19 @@ POSTHOOK: query: create table normal_join_results(k1 int, 
v1 string, k2 int, v2
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@normal_join_results
-PREHOOK: query: load data local inpath '../../data/files/empty1.txt' into 
table smb_bucket4_1
+PREHOOK: query: load data local inpath '../../data/files/empty/000000_0' into 
table smb_bucket4_1
 PREHOOK: type: LOAD
 #### A masked pattern was here ####
 PREHOOK: Output: default@smb_bucket4_1
-POSTHOOK: query: load data local inpath '../../data/files/empty1.txt' into 
table smb_bucket4_1
+POSTHOOK: query: load data local inpath '../../data/files/empty/000000_0' into 
table smb_bucket4_1
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket4_1
-PREHOOK: query: load data local inpath '../../data/files/empty2.txt' into 
table smb_bucket4_1
+PREHOOK: query: load data local inpath '../../data/files/empty/000001_0' into 
table smb_bucket4_1
 PREHOOK: type: LOAD
 #### A masked pattern was here ####
 PREHOOK: Output: default@smb_bucket4_1
-POSTHOOK: query: load data local inpath '../../data/files/empty2.txt' into 
table smb_bucket4_1
+POSTHOOK: query: load data local inpath '../../data/files/empty/000001_0' into 
table smb_bucket4_1
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out 
b/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out
index b71c5b8..83033b0 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out
@@ -38,19 +38,19 @@ POSTHOOK: query: create table normal_join_results(k1 int, 
v1 string, k2 int, v2
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@normal_join_results
-PREHOOK: query: load data local inpath '../../data/files/empty1.txt' into 
table smb_bucket4_1
+PREHOOK: query: load data local inpath '../../data/files/empty/000000_0' into 
table smb_bucket4_1
 PREHOOK: type: LOAD
 #### A masked pattern was here ####
 PREHOOK: Output: default@smb_bucket4_1
-POSTHOOK: query: load data local inpath '../../data/files/empty1.txt' into 
table smb_bucket4_1
+POSTHOOK: query: load data local inpath '../../data/files/empty/000000_0' into 
table smb_bucket4_1
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket4_1
-PREHOOK: query: load data local inpath '../../data/files/empty2.txt' into 
table smb_bucket4_1
+PREHOOK: query: load data local inpath '../../data/files/empty/000001_0' into 
table smb_bucket4_1
 PREHOOK: type: LOAD
 #### A masked pattern was here ####
 PREHOOK: Output: default@smb_bucket4_1
-POSTHOOK: query: load data local inpath '../../data/files/empty2.txt' into 
table smb_bucket4_1
+POSTHOOK: query: load data local inpath '../../data/files/empty/000001_0' into 
table smb_bucket4_1
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out 
b/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out
index ac49c02..610abab 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out
@@ -38,19 +38,19 @@ POSTHOOK: query: create table normal_join_results(k1 int, 
v1 string, k2 int, v2
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@normal_join_results
-PREHOOK: query: load data local inpath '../../data/files/empty1.txt' into 
table smb_bucket4_1
+PREHOOK: query: load data local inpath '../../data/files/empty/000000_0' into 
table smb_bucket4_1
 PREHOOK: type: LOAD
 #### A masked pattern was here ####
 PREHOOK: Output: default@smb_bucket4_1
-POSTHOOK: query: load data local inpath '../../data/files/empty1.txt' into 
table smb_bucket4_1
+POSTHOOK: query: load data local inpath '../../data/files/empty/000000_0' into 
table smb_bucket4_1
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket4_1
-PREHOOK: query: load data local inpath '../../data/files/empty2.txt' into 
table smb_bucket4_1
+PREHOOK: query: load data local inpath '../../data/files/empty/000001_0' into 
table smb_bucket4_1
 PREHOOK: type: LOAD
 #### A masked pattern was here ####
 PREHOOK: Output: default@smb_bucket4_1
-POSTHOOK: query: load data local inpath '../../data/files/empty2.txt' into 
table smb_bucket4_1
+POSTHOOK: query: load data local inpath '../../data/files/empty/000001_0' into 
table smb_bucket4_1
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket4_1

Reply via email to