hive git commit: HIVE-20731: keystore file in JdbcStorageHandler should be authorized (Add missing file)

2018-10-16 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master e39a19801 -> fa97c67f4


HIVE-20731: keystore file in JdbcStorageHandler should be authorized (Add 
missing file)

Signed-off-by: Thejas M Nair 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fa97c67f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fa97c67f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fa97c67f

Branch: refs/heads/master
Commit: fa97c67f4bdcb7f1ea090cccf28571a069b36b8e
Parents: e39a198
Author: Daniel Dai 
Authored: Tue Oct 16 16:48:47 2018 -0700
Committer: Daniel Dai 
Committed: Tue Oct 16 16:49:14 2018 -0700

--
 data/files/test.jceks | Bin 0 -> 988 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/fa97c67f/data/files/test.jceks
--
diff --git a/data/files/test.jceks b/data/files/test.jceks
new file mode 100644
index 000..bfeefe1
Binary files /dev/null and b/data/files/test.jceks differ



[2/2] hive git commit: HIVE-20307 : Add support for filterspec to the getPartitions with projection API (Vihang Karajgaonkar, reviewed by Andrew Sherman)

2018-10-16 Thread vihangk1
HIVE-20307 : Add support for filterspec to the getPartitions with projection 
API (Vihang Karajgaonkar, reviewed by Andrew Sherman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e39a1980
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e39a1980
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e39a1980

Branch: refs/heads/master
Commit: e39a19801abf7b2a711883945e8c7a9e3551a09d
Parents: dc8d8e1
Author: Vihang Karajgaonkar 
Authored: Wed Aug 22 20:29:23 2018 -0700
Committer: Vihang Karajgaonkar 
Committed: Tue Oct 16 14:15:45 2018 -0700

--
 .../listener/DummyRawStoreFailEvent.java|  12 +-
 .../ql/metadata/SessionHiveMetaStoreClient.java |   2 +-
 .../hive/metastore/utils/MetaStoreUtils.java|   6 +-
 .../hadoop/hive/metastore/HiveMetaStore.java|  20 +-
 .../hive/metastore/MetaStoreDirectSql.java  |  53 +-
 .../hadoop/hive/metastore/ObjectStore.java  | 173 +++-
 .../apache/hadoop/hive/metastore/RawStore.java  |  51 +-
 .../hive/metastore/cache/CachedStore.java   |  11 +-
 .../DummyRawStoreControlledCommit.java  |  22 +-
 .../DummyRawStoreForJdoConnection.java  |   6 +-
 .../TestGetPartitionsUsingProjection.java   | 700 --
 ...PartitionsUsingProjectionAndFilterSpecs.java | 904 +++
 12 files changed, 1146 insertions(+), 814 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e39a1980/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
--
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index d59d5d8..c3e1e8e 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -19,6 +19,8 @@
 package org.apache.hive.hcatalog.listener;
 
 import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectionSpec;
 import org.apache.hadoop.hive.metastore.api.ISchemaName;
 import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 import org.apache.hadoop.hive.metastore.api.Catalog;
@@ -413,12 +415,10 @@ public class DummyRawStoreFailEvent implements RawStore, 
Configurable {
   }
 
   @Override
-  public List getPartitionSpecsByFilterAndProjection(String catalog,
-String dbName, 
String tblName,
-List 
fieldList, String includeParamKeyPattern,
-  String excludeParamKeyPattern) throws MetaException, 
NoSuchObjectException {
-return objectStore.getPartitionSpecsByFilterAndProjection(catalog, dbName, 
tblName, fieldList,
-includeParamKeyPattern, excludeParamKeyPattern);
+  public List getPartitionSpecsByFilterAndProjection(Table table,
+  GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec 
filterSpec)
+  throws MetaException, NoSuchObjectException {
+return objectStore.getPartitionSpecsByFilterAndProjection(table, 
projectionSpec, filterSpec);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/e39a1980/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index a2b57fb..dd23d7d 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -948,7 +948,7 @@ public class SessionHiveMetaStoreClient extends 
HiveMetaStoreClient implements I
*
*/
   private List getPartitions(List partialPartVals) 
throws MetaException {
-String partNameMatcher = MetaStoreUtils.makePartNameMatcher(tTable, 
partialPartVals);
+String partNameMatcher = MetaStoreUtils.makePartNameMatcher(tTable, 
partialPartVals, ".*");
 List matchedPartitions = new ArrayList<>();
 for(String key : parts.keySet()) {
   if(key.matches(partNameMatcher)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/e39a1980/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
---

[1/2] hive git commit: HIVE-20307 : Add support for filterspec to the getPartitions with projection API (Vihang Karajgaonkar, reviewed by Andrew Sherman)

2018-10-16 Thread vihangk1
Repository: hive
Updated Branches:
  refs/heads/master dc8d8e134 -> e39a19801


http://git-wip-us.apache.org/repos/asf/hive/blob/e39a1980/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java
--
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java
new file mode 100644
index 000..bc43f3d
--- /dev/null
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java
@@ -0,0 +1,904 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.commons.beanutils.PropertyUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectionSpec;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsResponse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionFilterMode;
+import org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD;
+import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.apache.thrift.TException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.InvocationTargetException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.apache.hadoop.hive.metastore.ColumnType.SERIALIZATION_FORMAT;
+
+/**
+ * Tests for getPartitionsWithSpecs metastore API. This test create some 
partitions and makes sure
+ * that getPartitionsWithSpecs returns results which are comparable with the 
get_partitions API when
+ * various combinations of projection spec are set. Also checks the JDO code 
path in addition to
+ * directSQL code path
+ */
+@Category(MetastoreCheckinTest.class)
+public class TestGetPartitionsUsingProjectionAndFilterSpecs {
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestGetPartitionsUsingProjectionAndFilterSpecs.class);
+  protected static Configuration conf = MetastoreConf.newMetastoreConf();
+  private static int port;
+  private static final String dbName = "test_projection_db";
+  private static final String tblName = "test_projection_table";
+  private List origPartitions;
+  private Table tbl;
+  private static final String EXCLUDE_KEY_PREFIX = "exclude";
+  private HiveMetaStoreClient client;
+
+  @BeforeClass
+  public static void startMetaStoreServer() throws Exception {
+conf.set("hive.in.test", "true");
+MetaStoreTestUtil

hive git commit: HIVE-20731: keystore file in JdbcStorageHandler should be authorized (Daniel Dai, reviewed by Thejas Nair)

2018-10-16 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master d7be4b9f2 -> dc8d8e134


HIVE-20731: keystore file in JdbcStorageHandler should be authorized (Daniel 
Dai, reviewed by Thejas Nair)

Signed-off-by: Thejas M Nair 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dc8d8e13
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dc8d8e13
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dc8d8e13

Branch: refs/heads/master
Commit: dc8d8e134c2cc752e89d1b6ccf3097c8e43aa88a
Parents: d7be4b9
Author: Daniel Dai 
Authored: Tue Oct 16 13:52:05 2018 -0700
Committer: Daniel Dai 
Committed: Tue Oct 16 13:52:13 2018 -0700

--
 .../hive/ql/parse/BaseSemanticAnalyzer.java | 27 
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |  1 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  1 +
 .../authorization_jdbc_keystore.q   | 28 +
 .../queries/clientpositive/external_jdbc_auth.q | 26 
 .../authorization_jdbc_keystore.q.out   |  1 +
 .../llap/external_jdbc_auth.q.out   | 66 +++-
 7 files changed, 148 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/dc8d8e13/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index 1df5c74..c9df668 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.type.Date;
+import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -98,6 +99,9 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -2275,4 +2279,27 @@ public abstract class BaseSemanticAnalyzer {
   public WriteEntity getAcidAnalyzeTable() {
 return null;
   }
+
+  public void addPropertyReadEntry(Map tblProps, 
Set inputs) throws SemanticException {
+if (tblProps.containsKey(Constants.JDBC_KEYSTORE)) {
+  try {
+String keystore = tblProps.get(Constants.JDBC_KEYSTORE);
+Configuration conf = new Configuration();
+conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, keystore);
+boolean found = false;
+for (CredentialProvider provider : 
CredentialProviderFactory.getProviders(conf))
+  if (provider instanceof AbstractJavaKeyStoreProvider) {
+Path path = ((AbstractJavaKeyStoreProvider) provider).getPath();
+inputs.add(toReadEntity(path));
+found = true;
+  }
+if (!found) {
+  throw new SemanticException("Cannot recognize keystore " + keystore 
+ ", only JavaKeyStoreProvider is " +
+  "supported");
+}
+  } catch (IOException e) {
+throw new SemanticException(e);
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/dc8d8e13/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 29f6ecf..bba7d6c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -1771,6 +1771,7 @@ public class DDLSemanticAnalyzer extends 
BaseSemanticAnalyzer {
 alterTblDesc.setDropIfExists(true);
   }
 } else {
+  addPropertyReadEntry(mapProp, inputs);
   alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDPROPS, partSpec, 
expectView);
 }
 alterTblDesc.setProps(mapProp);

http://git-wip-us.apache.org/repos/asf/hive/blob/dc8d8e13/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff -

hive git commit: HIVE-17231: ColumnizedDeleteEventRegistry.DeleteReaderValue optimization (Eugene Koifman, reviewed by Gopal V)

2018-10-16 Thread ekoifman
Repository: hive
Updated Branches:
  refs/heads/master 0f2f999bb -> d7be4b9f2


HIVE-17231: ColumnizedDeleteEventRegistry.DeleteReaderValue optimization 
(Eugene Koifman, reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d7be4b9f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d7be4b9f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d7be4b9f

Branch: refs/heads/master
Commit: d7be4b9f26345439c472969461d3d2c81f7e5057
Parents: 0f2f999
Author: Eugene Koifman 
Authored: Tue Oct 16 11:23:11 2018 -0700
Committer: Eugene Koifman 
Committed: Tue Oct 16 11:23:11 2018 -0700

--
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |  4 ++
 .../hive/ql/io/orc/OrcRawRecordMerger.java  | 55 +++-
 .../io/orc/VectorizedOrcAcidRowBatchReader.java | 13 +
 3 files changed, 12 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d7be4b9f/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 8c7a78b..79e41d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -388,6 +388,10 @@ public class FileSinkOperator extends 
TerminalOperator implements
  *
  * A new FSP is created for each partition, so this only requires the 
bucket numbering and that
  * is mapped in directly as an index.
+ *
+ * This relies on ReduceSinkOperator to shuffle update/delete rows by
+ * UDFToInteger(RecordIdentifier), i.e. by writerId in ROW__ID.
+ * {@link 
org.apache.hadoop.hive.ql.parse.SemanticAnalyzer#getPartitionColsFromBucketColsForUpdateDelete(Operator,
 boolean)}
  */
 public int createDynamicBucket(int bucketNum) {
   // this assumes all paths are bucket names (which means no lookup is 
needed)

http://git-wip-us.apache.org/repos/asf/hive/blob/d7be4b9f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
index 6be0c74..8cabf96 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
@@ -1122,7 +1122,7 @@ public class OrcRawRecordMerger implements 
AcidInputFormat.RawReader{
   }
   continue;
 }
-for (Path deltaFile : getDeltaFiles(delta, bucket, conf, 
mergerOptions, isBucketed)) {
+for (Path deltaFile : getDeltaFiles(delta, bucket, mergerOptions)) {
   FileSystem fs = deltaFile.getFileSystem(conf);
   if(!fs.exists(deltaFile)) {
 /**
@@ -1262,53 +1262,12 @@ public class OrcRawRecordMerger implements 
AcidInputFormat.RawReader{
* This determines the set of {@link ReaderPairAcid} to create for a given 
delta/.
* For unbucketed tables {@code bucket} can be thought of as a write tranche.
*/
-  static Path[] getDeltaFiles(Path deltaDirectory, int bucket, Configuration 
conf,
-  Options mergerOptions, boolean isBucketed) 
throws IOException {
-if(isBucketed) {
-  /**
-   * for bucketed tables (for now) we always trust that the N in bucketN 
file name means that
-   * all records have {@link RecordIdentifier#getBucketProperty()} 
encoding bucketId = N.  This
-   * means that a delete event in bucketN can only modify an insert in 
another bucketN file for
-   * the same N. (Down the road we may trust it only in certain delta dirs)
-   *
-   * Compactor takes all types of deltas for a given bucket.  For regular 
read, any file that
-   * contains (only) insert events is treated as base and only
-   * delete_delta/ are treated as deltas.
-   */
-assert (!mergerOptions.isCompacting &&
-  deltaDirectory.getName().startsWith(AcidUtils.DELETE_DELTA_PREFIX)
-) || mergerOptions.isCompacting : "Unexpected delta: " + 
deltaDirectory;
-  Path deltaFile = AcidUtils.createBucketFile(deltaDirectory, bucket);
-  return new Path[]{deltaFile};
-}
-/**
- * For unbucketed tables insert events are also stored in bucketN files 
but here N is
- * the writer ID.  We can trust that N matches info in {@link 
RecordIdentifier#getBucketProperty()}
- * delta_x_y but it's not required since we can't trust N for 
delete_delta_x_x/bucketN.
- * Thus we always have to take all files in a dele

hive git commit: HIVE-20509: Plan: fix wasted memory in plans with large partition counts (Barnabas Maidics reviewed by Gopal V and Peter Vary)

2018-10-16 Thread pvary
Repository: hive
Updated Branches:
  refs/heads/master 34de7ac80 -> 0f2f999bb


HIVE-20509: Plan: fix wasted memory in plans with large partition counts 
(Barnabas Maidics reviewed by Gopal V and Peter Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0f2f999b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0f2f999b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0f2f999b

Branch: refs/heads/master
Commit: 0f2f999bbd2862ff61ce62a3b4047af6621030be
Parents: 34de7ac
Author: Barnabas Maidics 
Authored: Tue Oct 16 10:21:30 2018 +0200
Committer: Peter Vary 
Committed: Tue Oct 16 10:21:30 2018 +0200

--
 ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0f2f999b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index e7256cc..d5a30da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -201,7 +201,7 @@ public class MapWork extends BaseWork {
   public void addPathToAlias(Path path, String newAlias){
 ArrayList aliases = pathToAliases.get(path);
 if (aliases == null) {
-  aliases = new ArrayList<>();
+  aliases = new ArrayList<>(1);
   StringInternUtils.internUriStringsInPath(path);
   pathToAliases.put(path, aliases);
 }