Repository: hive
Updated Branches:
  refs/heads/master dfb868d1a -> e44efe059


HIVE-13947: HoS print wrong number for hash table size in map join scenario 
(Aihua Xu, reviewed by Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e44efe05
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e44efe05
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e44efe05

Branch: refs/heads/master
Commit: e44efe05910a585062dd84e86c85f82e61e2b027
Parents: dfb868d
Author: Aihua Xu <aihu...@apache.org>
Authored: Tue Nov 1 10:51:26 2016 -0400
Committer: Aihua Xu <aihu...@apache.org>
Committed: Mon Nov 7 09:16:56 2016 -0500

----------------------------------------------------------------------
 .../ql/exec/SparkHashTableSinkOperator.java     | 27 +++++++++++---------
 1 file changed, 15 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e44efe05/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
index 3b358ee..c3b1d0a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
@@ -156,14 +156,23 @@ public class SparkHashTableSinkOperator
     replication = (short) Math.max(minReplication, numOfPartitions);
     htsOperator.console.printInfo(Utilities.now() + "\tDump the side-table for 
tag: " + tag
       + " with group count: " + tableContainer.size() + " into file: " + path);
-    // get the hashtable file and path
-    OutputStream os = null;
-    ObjectOutputStream out = null;
     try {
-      os = fs.create(path, replication);
-      out = new ObjectOutputStream(new BufferedOutputStream(os, 4096));
+      // get the hashtable file and path
+      OutputStream os = null;
+      ObjectOutputStream out = null;
       MapJoinTableContainerSerDe mapJoinTableSerde = 
htsOperator.mapJoinTableSerdes[tag];
-      mapJoinTableSerde.persist(out, tableContainer);
+      try {
+        os = fs.create(path, replication);
+        out = new ObjectOutputStream(new BufferedOutputStream(os, 4096));
+        mapJoinTableSerde.persist(out, tableContainer);
+      } finally {
+        if (out != null) {
+          out.close();
+        } else if (os != null) {
+          os.close();
+        }
+      }
+
       FileStatus status = fs.getFileStatus(path);
       htsOperator.console.printInfo(Utilities.now() + "\tUploaded 1 File to: " 
+ path
         + " (" + status.getLen() + " bytes)");
@@ -176,12 +185,6 @@ public class SparkHashTableSinkOperator
           + tag + ", file " + path, ex);
       }
       throw e;
-    } finally {
-      if (out != null) {
-        out.close();
-      } else if (os != null) {
-        os.close();
-      }
     }
     tableContainer.clear();
   }

Reply via email to