hive git commit: HIVE-12944: Support SUM(DISTINCT) for partitioning query. (Aihua Xu, reviewed by Szehon Ho)

2016-02-01 Thread aihuaxu
Repository: hive
Updated Branches:
  refs/heads/master 0ec6e8893 -> 11f1e47eb


HIVE-12944: Support SUM(DISTINCT) for partitioning query. (Aihua Xu, reviewed 
by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/11f1e47e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/11f1e47e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/11f1e47e

Branch: refs/heads/master
Commit: 11f1e47ebadc7cba24e1fb9f0dfbfaf7f786d2cb
Parents: 0ec6e88
Author: Aihua Xu 
Authored: Wed Jan 27 11:25:00 2016 -0500
Committer: Aihua Xu 
Committed: Mon Feb 1 10:15:01 2016 -0500

--
 .../functions/HiveSqlSumAggFunction.java|   3 +
 .../translator/SqlFunctionConverter.java|   6 +
 .../hive/ql/udf/generic/GenericUDAFCount.java   |   3 -
 .../hive/ql/udf/generic/GenericUDAFSum.java | 110 ++-
 .../queries/clientpositive/windowing_distinct.q |   8 ++
 .../clientnegative/invalid_sum_syntax.q.out |   2 +-
 .../clientpositive/windowing_distinct.q.out |  26 +
 7 files changed, 125 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/11f1e47e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/functions/HiveSqlSumAggFunction.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/functions/HiveSqlSumAggFunction.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/functions/HiveSqlSumAggFunction.java
index 056eaeb..498cd0e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/functions/HiveSqlSumAggFunction.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/functions/HiveSqlSumAggFunction.java
@@ -71,6 +71,9 @@ public class HiveSqlSumAggFunction extends SqlAggFunction {
 
   //~ Methods 
 
+  public boolean isDistinct() {
+return isDistinct;
+  }
 
   @Override
   public  T unwrap(Class clazz) {

http://git-wip-us.apache.org/repos/asf/hive/blob/11f1e47e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 75c38fa..19aa414 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -229,6 +229,12 @@ public class SqlFunctionConverter {
 "TOK_FUNCTIONDI");
   }
 }
+  } else if (op instanceof HiveSqlSumAggFunction) { // case 
SUM(DISTINCT)
+HiveSqlSumAggFunction sumFunction = (HiveSqlSumAggFunction) op;
+if (sumFunction.isDistinct()) {
+  node = (ASTNode) 
ParseDriver.adaptor.create(HiveParser.TOK_FUNCTIONDI,
+  "TOK_FUNCTIONDI");
+}
   }
   node.addChild((ASTNode) 
ParseDriver.adaptor.create(HiveParser.Identifier, op.getName()));
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/11f1e47e/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java
index f526c43..2825045 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java
@@ -17,13 +17,11 @@
  */
 package org.apache.hadoop.hive.ql.udf.generic;
 
-import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
-import org.apache.hadoop.hive.serde2.lazy.LazyString;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import 
org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
@@ -31,7 +29,6 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspect
 import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.io.LongWr

[03/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-server/src/test/org/apache/tez/dag/app/rm/TestLlapTaskSchedulerService.java
--
diff --git 
a/llap-server/src/test/org/apache/tez/dag/app/rm/TestLlapTaskSchedulerService.java
 
b/llap-server/src/test/org/apache/tez/dag/app/rm/TestLlapTaskSchedulerService.java
deleted file mode 100644
index 4c1cbb3..000
--- 
a/llap-server/src/test/org/apache/tez/dag/app/rm/TestLlapTaskSchedulerService.java
+++ /dev/null
@@ -1,685 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.tez.dag.app.rm;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.configuration.LlapConfiguration;
-import org.apache.hadoop.hive.llap.registry.impl.LlapFixedRegistryImpl;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.util.Clock;
-import org.apache.hadoop.yarn.util.SystemClock;
-import org.apache.tez.common.TezUtils;
-import org.apache.tez.dag.api.UserPayload;
-import org.apache.tez.dag.app.ControlledClock;
-import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
-import org.apache.tez.serviceplugins.api.TaskSchedulerContext;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TestLlapTaskSchedulerService {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestLlapTaskSchedulerService.class);
-
-  private static final String HOST1 = "host1";
-  private static final String HOST2 = "host2";
-  private static final String HOST3 = "host3";
-
-  @Test (timeout = 5000)
-  public void testSimpleLocalAllocation() throws IOException, 
InterruptedException {
-
-TestTaskSchedulerServiceWrapper tsWrapper = new 
TestTaskSchedulerServiceWrapper();
-
-try {
-  Priority priority1 = Priority.newInstance(1);
-  String[] hosts1 = new String[]{HOST1};
-
-  Object task1 = new Object();
-  Object clientCookie1 = new Object();
-
-  tsWrapper.controlScheduler(true);
-  tsWrapper.allocateTask(task1, hosts1, priority1, clientCookie1);
-
-  tsWrapper.signalSchedulerRun();
-  tsWrapper.awaitSchedulerRun();
-
-  verify(tsWrapper.mockAppCallback).taskAllocated(eq(task1), 
eq(clientCookie1), any(Container.class));
-  // TODO Verify this is on host1.
-  assertEquals(1, tsWrapper.ts.dagStats.numLocalAllocations);
-} finally {
-  tsWrapper.shutdown();
-}
-  }
-
-  @Test (timeout = 5000)
-  public void testSimpleNoLocalityAllocation() throws IOException, 
InterruptedException {
-TestTaskSchedulerServiceWrapper tsWrapper = new 
TestTaskSchedulerServiceWrapper();
-
-try {
-  Priority priority1 = Priority.newInstance(1);
-
-  Object task1 = new Object();
-  Object clientCookie1 = new Object();
-  tsWrapper.controlScheduler(true);
-  tsWrapper.allocateTask(task1, null, priority1, clientCookie1);
-  tsWrapper.signalSchedulerRun();
-  tsWrapper.awaitSchedulerRun();
-  verify(tsWrapper.mockAppCallback).taskAllocated(eq(task1), 
eq(clientCookie1), any(Container.class));
-  assertEquals(1, tsWrapper.ts.dagStats.numAllocationsNoLocalityRequest);
-} finally {
-  tsWrapper.shutdown();
-}
-  }
-
-
-  @Test(timeout=5000)
-  public void testPreemption() throws Inte

[04/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
--
diff --git 
a/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java 
b/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
deleted file mode 100644
index 6beb4f8..000
--- 
a/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
+++ /dev/null
@@ -1,1512 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tez.dag.app.rm;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.NavigableMap;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.DelayQueue;
-import java.util.concurrent.Delayed;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import org.apache.commons.lang.mutable.MutableInt;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.registry.ServiceInstance;
-import org.apache.hadoop.hive.llap.registry.ServiceInstanceSet;
-import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.util.Clock;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.yarn.util.SystemClock;
-import org.apache.tez.common.TezUtils;
-import org.apache.tez.dag.api.TezUncheckedException;
-import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
-import org.apache.tez.serviceplugins.api.TaskScheduler;
-import org.apache.tez.serviceplugins.api.TaskSchedulerContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LlapTaskSchedulerService extends TaskScheduler {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(LlapTaskSchedulerService.class);
-
-  private final Configuration conf;
-
-  // interface into the registry service
-  private ServiceInstanceSet activeInstances;
-
-  // Tracks all instances, including ones which have been disabled in the past.
-  // LinkedHashMap to provide the same iteration order when selecting a random 
host.
-  @VisibleForTesting
-  final Map instanceToNodeMap = new 
LinkedHashMap<>();
-  // TODO Ideally, remove elements from this once it's known that no tasks are 
linked to the instance (all deallocated)
-
-  // Tracks tasks which could not be allocated immediately.
-  @VisibleForTesting
-  final TreeMap> pendingTasks = new TreeMap<>(new 
Comparator() {
-@Override
-public int compare(Priority o1, Priority o2) {
-  return o1.getPriority() - o2.getPriority();
-}
-  });
-
-  // Tracks running and queued tasks. Cleared after a task completes.
-  private final ConcurrentMap knownTasks = new 
ConcurrentHashMap<>();
- 

[07/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
--
diff --git 
a/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
 
b/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
deleted file mode 100644
index 4ab7b32..000
--- 
a/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ /dev/null
@@ -1,14553 +0,0 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: LlapDaemonProtocol.proto
-
-package org.apache.hadoop.hive.llap.daemon.rpc;
-
-public final class LlapDaemonProtocolProtos {
-  private LlapDaemonProtocolProtos() {}
-  public static void registerAllExtensions(
-  com.google.protobuf.ExtensionRegistry registry) {
-  }
-  /**
-   * Protobuf enum {@code SourceStateProto}
-   */
-  public enum SourceStateProto
-  implements com.google.protobuf.ProtocolMessageEnum {
-/**
- * S_SUCCEEDED = 1;
- */
-S_SUCCEEDED(0, 1),
-/**
- * S_RUNNING = 2;
- */
-S_RUNNING(1, 2),
-;
-
-/**
- * S_SUCCEEDED = 1;
- */
-public static final int S_SUCCEEDED_VALUE = 1;
-/**
- * S_RUNNING = 2;
- */
-public static final int S_RUNNING_VALUE = 2;
-
-
-public final int getNumber() { return value; }
-
-public static SourceStateProto valueOf(int value) {
-  switch (value) {
-case 1: return S_SUCCEEDED;
-case 2: return S_RUNNING;
-default: return null;
-  }
-}
-
-public static com.google.protobuf.Internal.EnumLiteMap
-internalGetValueMap() {
-  return internalValueMap;
-}
-private static com.google.protobuf.Internal.EnumLiteMap
-internalValueMap =
-  new com.google.protobuf.Internal.EnumLiteMap() {
-public SourceStateProto findValueByNumber(int number) {
-  return SourceStateProto.valueOf(number);
-}
-  };
-
-public final com.google.protobuf.Descriptors.EnumValueDescriptor
-getValueDescriptor() {
-  return getDescriptor().getValues().get(index);
-}
-public final com.google.protobuf.Descriptors.EnumDescriptor
-getDescriptorForType() {
-  return getDescriptor();
-}
-public static final com.google.protobuf.Descriptors.EnumDescriptor
-getDescriptor() {
-  return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.getDescriptor().getEnumTypes().get(0);
-}
-
-private static final SourceStateProto[] VALUES = values();
-
-public static SourceStateProto valueOf(
-com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
-  if (desc.getType() != getDescriptor()) {
-throw new java.lang.IllegalArgumentException(
-  "EnumValueDescriptor is not for this type.");
-  }
-  return VALUES[desc.getIndex()];
-}
-
-private final int index;
-private final int value;
-
-private SourceStateProto(int index, int value) {
-  this.index = index;
-  this.value = value;
-}
-
-// @@protoc_insertion_point(enum_scope:SourceStateProto)
-  }
-
-  /**
-   * Protobuf enum {@code SubmissionStateProto}
-   */
-  public enum SubmissionStateProto
-  implements com.google.protobuf.ProtocolMessageEnum {
-/**
- * ACCEPTED = 1;
- */
-ACCEPTED(0, 1),
-/**
- * REJECTED = 2;
- */
-REJECTED(1, 2),
-/**
- * EVICTED_OTHER = 3;
- */
-EVICTED_OTHER(2, 3),
-;
-
-/**
- * ACCEPTED = 1;
- */
-public static final int ACCEPTED_VALUE = 1;
-/**
- * REJECTED = 2;
- */
-public static final int REJECTED_VALUE = 2;
-/**
- * EVICTED_OTHER = 3;
- */
-public static final int EVICTED_OTHER_VALUE = 3;
-
-
-public final int getNumber() { return value; }
-
-public static SubmissionStateProto valueOf(int value) {
-  switch (value) {
-case 1: return ACCEPTED;
-case 2: return REJECTED;
-case 3: return EVICTED_OTHER;
-default: return null;
-  }
-}
-
-public static 
com.google.protobuf.Internal.EnumLiteMap
-internalGetValueMap() {
-  return internalValueMap;
-}
-private static 
com.google.protobuf.Internal.EnumLiteMap
-internalValueMap =
-  new com.google.protobuf.Internal.EnumLiteMap() 
{
-public SubmissionStateProto findValueByNumber(int number) {
-  return SubmissionStateProto.valueOf(number);
-}
-  };
-
-public final com.google.protobuf.Descriptors.EnumValueDescriptor
-getValueDescriptor() {
-  return getDescriptor().getValues().get(index);
-}
-public final com.google.protobuf.Descriptors.EnumDescriptor
-getDescriptorForType() 

[09/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
--
diff --git 
a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
 
b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
new file mode 100644
index 000..4ab7b32
--- /dev/null
+++ 
b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -0,0 +1,14553 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: LlapDaemonProtocol.proto
+
+package org.apache.hadoop.hive.llap.daemon.rpc;
+
+public final class LlapDaemonProtocolProtos {
+  private LlapDaemonProtocolProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  /**
+   * Protobuf enum {@code SourceStateProto}
+   */
+  public enum SourceStateProto
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * S_SUCCEEDED = 1;
+ */
+S_SUCCEEDED(0, 1),
+/**
+ * S_RUNNING = 2;
+ */
+S_RUNNING(1, 2),
+;
+
+/**
+ * S_SUCCEEDED = 1;
+ */
+public static final int S_SUCCEEDED_VALUE = 1;
+/**
+ * S_RUNNING = 2;
+ */
+public static final int S_RUNNING_VALUE = 2;
+
+
+public final int getNumber() { return value; }
+
+public static SourceStateProto valueOf(int value) {
+  switch (value) {
+case 1: return S_SUCCEEDED;
+case 2: return S_RUNNING;
+default: return null;
+  }
+}
+
+public static com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() {
+public SourceStateProto findValueByNumber(int number) {
+  return SourceStateProto.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.getDescriptor().getEnumTypes().get(0);
+}
+
+private static final SourceStateProto[] VALUES = values();
+
+public static SourceStateProto valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private SourceStateProto(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:SourceStateProto)
+  }
+
+  /**
+   * Protobuf enum {@code SubmissionStateProto}
+   */
+  public enum SubmissionStateProto
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * ACCEPTED = 1;
+ */
+ACCEPTED(0, 1),
+/**
+ * REJECTED = 2;
+ */
+REJECTED(1, 2),
+/**
+ * EVICTED_OTHER = 3;
+ */
+EVICTED_OTHER(2, 3),
+;
+
+/**
+ * ACCEPTED = 1;
+ */
+public static final int ACCEPTED_VALUE = 1;
+/**
+ * REJECTED = 2;
+ */
+public static final int REJECTED_VALUE = 2;
+/**
+ * EVICTED_OTHER = 3;
+ */
+public static final int EVICTED_OTHER_VALUE = 3;
+
+
+public final int getNumber() { return value; }
+
+public static SubmissionStateProto valueOf(int value) {
+  switch (value) {
+case 1: return ACCEPTED;
+case 2: return REJECTED;
+case 3: return EVICTED_OTHER;
+default: return null;
+  }
+}
+
+public static 
com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static 
com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() 
{
+public SubmissionStateProto findValueByNumber(int number) {
+  return SubmissionStateProto.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+ 

[01/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
Repository: hive
Updated Branches:
  refs/heads/master 11f1e47eb -> 4185d9b8e


http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java
--
diff --git 
a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java
 
b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java
new file mode 100644
index 000..36d8ffd
--- /dev/null
+++ 
b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java
@@ -0,0 +1,684 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.tezplugins;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.llap.registry.impl.LlapFixedRegistryImpl;
+import org.apache.hadoop.hive.llap.testhelpers.ControlledClock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
+import org.apache.tez.common.TezUtils;
+import org.apache.tez.dag.api.UserPayload;
+import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
+import org.apache.tez.serviceplugins.api.TaskSchedulerContext;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TestLlapTaskSchedulerService {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestLlapTaskSchedulerService.class);
+
+  private static final String HOST1 = "host1";
+  private static final String HOST2 = "host2";
+  private static final String HOST3 = "host3";
+
+  @Test (timeout = 5000)
+  public void testSimpleLocalAllocation() throws IOException, 
InterruptedException {
+
+TestTaskSchedulerServiceWrapper tsWrapper = new 
TestTaskSchedulerServiceWrapper();
+
+try {
+  Priority priority1 = Priority.newInstance(1);
+  String[] hosts1 = new String[]{HOST1};
+
+  Object task1 = new Object();
+  Object clientCookie1 = new Object();
+
+  tsWrapper.controlScheduler(true);
+  tsWrapper.allocateTask(task1, hosts1, priority1, clientCookie1);
+
+  tsWrapper.signalSchedulerRun();
+  tsWrapper.awaitSchedulerRun();
+
+  verify(tsWrapper.mockAppCallback).taskAllocated(eq(task1), 
eq(clientCookie1), any(Container.class));
+  // TODO Verify this is on host1.
+  assertEquals(1, tsWrapper.ts.dagStats.numLocalAllocations);
+} finally {
+  tsWrapper.shutdown();
+}
+  }
+
+  @Test (timeout = 5000)
+  public void testSimpleNoLocalityAllocation() throws IOException, 
InterruptedException {
+TestTaskSchedulerServiceWrapper tsWrapper = new 
TestTaskSchedulerServiceWrapper();
+
+try {
+  Priority priority1 = Priority.newInstance(1);
+
+  Object task1 = new Object();
+  Object clientCookie1 = new Object();
+  tsWrapper.controlScheduler(true);
+  tsWrapper.allocateTask(task1, null, priority1, clientCookie1);
+  tsWrapper.signalSchedulerRun();
+  tsWrapper.awaitSchedulerRun();
+  verify(tsWrapper.mockAppCallback).taskAllocated(eq(task1), 
eq(clientCookie1), any(Container.class));
+  assertEquals(1, tsWrapper.ts.dagStats.numAllocationsNoLocalityRequest);
+} finally {
+  tsWrapper.shutdown();
+

[02/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
--
diff --git 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
new file mode 100644
index 000..3bca0da
--- /dev/null
+++ 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
@@ -0,0 +1,1512 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.tezplugins;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.DelayQueue;
+import java.util.concurrent.Delayed;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.apache.commons.lang.mutable.MutableInt;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.llap.registry.ServiceInstance;
+import org.apache.hadoop.hive.llap.registry.ServiceInstanceSet;
+import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Clock;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.yarn.util.SystemClock;
+import org.apache.tez.common.TezUtils;
+import org.apache.tez.dag.api.TezUncheckedException;
+import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
+import org.apache.tez.serviceplugins.api.TaskScheduler;
+import org.apache.tez.serviceplugins.api.TaskSchedulerContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LlapTaskSchedulerService extends TaskScheduler {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(LlapTaskSchedulerService.class);
+
+  private final Configuration conf;
+
+  // interface into the registry service
+  private ServiceInstanceSet activeInstances;
+
+  // Tracks all instances, including ones which have been disabled in the past.
+  // LinkedHashMap to provide the same iteration order when selecting a random 
host.
+  @VisibleForTesting
+  final Map instanceToNodeMap = new 
LinkedHashMap<>();
+  // TODO Ideally, remove elements from this once it's known that no tasks are 
linked to the instance (all deallocated)
+
+  // Tracks tasks which could not be allocated immediately.
+  @VisibleForTesting
+  final TreeMap> pendingTasks = new TreeMap<>(new 
Comparator() {
+@Override
+public int compare(Priority o1, Priority o2) {
+  return o1.getPriority() - o2.getPriority();
+}
+  });
+
+  // Tracks running and queued tasks. Cleared after a task completes.
+  private final Concurr

[05/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapDaemonProtocolClientProxy.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapDaemonProtocolClientProxy.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapDaemonProtocolClientProxy.java
deleted file mode 100644
index 2884e40..000
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapDaemonProtocolClientProxy.java
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hive.llap.tezplugins;
-
-import javax.net.SocketFactory;
-
-import java.io.IOException;
-import java.security.PrivilegedAction;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.Message;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.LlapNodeId;
-import org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB;
-import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemonProtocolClientImpl;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
-import org.apache.hadoop.hive.llap.security.LlapTokenIdentifier;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.service.AbstractService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LlapDaemonProtocolClientProxy extends AbstractService {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(LlapDaemonProtocolClientProxy.class);
-
-  private final ConcurrentMap 
hostProxies;
-
-  private final RequestManager requestManager;
-  private final RetryPolicy retryPolicy;
-  private final SocketFactory socketFactory;
-
-  private final ListeningExecutorService requestManagerExecutor;
-  private volatile ListenableFuture requestManagerFuture;
-  private final Token llapToken;
-
-  public LlapDaemonProtocolClientProxy(
-  int numThreads, Configuration conf, Token 
llapToken) {
-super(LlapDaemonProtocolClientProxy.class.getSimpleName());
-this.hostProxies = new ConcurrentHashMap<>();
-this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
-this.llapToken = llapToken;
-
-long connectionTimeout = HiveConf.getTimeVar(conf,
-ConfVars.LLAP_TASK_COMMUNI

[10/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
HIVE-12934. Refactor llap module structure to allow for a usable client. 
(Siddharth Seth, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4185d9b8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4185d9b8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4185d9b8

Branch: refs/heads/master
Commit: 4185d9b8e2eecfef3b5a38899f6928fa82c01e99
Parents: 11f1e47
Author: Siddharth Seth 
Authored: Mon Feb 1 09:45:09 2016 -0800
Committer: Siddharth Seth 
Committed: Mon Feb 1 09:45:09 2016 -0800

--
 llap-client/pom.xml |87 +-
 .../hive/llap/security/LlapTokenIdentifier.java |82 -
 .../hive/llap/security/LlapTokenProvider.java   |27 -
 .../hive/llap/tez/LlapProtocolClientProxy.java  |   509 +
 .../tez/TestLlapDaemonProtocolClientProxy.java  |   144 +
 llap-common/pom.xml |   235 +
 .../daemon/rpc/LlapDaemonProtocolProtos.java| 14553 +
 .../org/apache/hadoop/hive/llap/LlapNodeId.java |86 +
 .../impl/LlapManagementProtocolClientImpl.java  |82 +
 .../hive/llap/impl/LlapProtocolClientImpl.java  |   125 +
 .../llap/protocol/LlapManagementProtocolPB.java |26 +
 .../llap/protocol/LlapProtocolBlockingPB.java   |30 +
 .../protocol/LlapTaskUmbilicalProtocol.java |42 +
 .../hive/llap/security/LlapTokenIdentifier.java |82 +
 .../hive/llap/security/LlapTokenProvider.java   |27 +
 .../hive/llap/security/LlapTokenSelector.java   |53 +
 .../apache/hadoop/hive/llap/tez/Converters.java |   265 +
 .../src/protobuf/LlapDaemonProtocol.proto   |   148 +
 .../hive/llap/testhelpers/ControlledClock.java  |43 +
 llap-server/pom.xml |   199 +-
 .../daemon/rpc/LlapDaemonProtocolProtos.java| 14553 -
 .../org/apache/hadoop/hive/llap/LlapNodeId.java |86 -
 .../hadoop/hive/llap/cli/LlapServiceDriver.java | 8 +
 .../daemon/LlapDaemonProtocolBlockingPB.java|28 -
 .../LlapManagementProtocolBlockingPB.java   |24 -
 .../hive/llap/daemon/impl/LlapDaemon.java   | 6 +-
 .../impl/LlapDaemonProtocolClientImpl.java  |   125 -
 .../impl/LlapDaemonProtocolServerImpl.java  |   251 -
 .../impl/LlapManagementProtocolClientImpl.java  |82 -
 .../daemon/impl/LlapProtocolServerImpl.java |   251 +
 .../llap/daemon/impl/QueryFragmentInfo.java |13 +-
 .../llap/daemon/impl/TaskRunnerCallable.java| 2 +-
 .../protocol/LlapTaskUmbilicalProtocol.java |42 -
 .../llap/security/LlapDaemonPolicyProvider.java | 8 +-
 .../hive/llap/security/LlapSecurityHelper.java  | 2 +-
 .../llap/security/LlapServerSecurityInfo.java   |10 +-
 .../hive/llap/security/LlapTokenSelector.java   |53 -
 .../hadoop/hive/llap/tezplugins/Converters.java |   265 -
 .../llap/tezplugins/LlapContainerLauncher.java  |43 -
 .../LlapDaemonProtocolClientProxy.java  |   509 -
 .../llap/tezplugins/LlapTaskCommunicator.java   |   755 -
 .../tezplugins/LlapUmbilicalPolicyProvider.java |42 -
 .../tezplugins/helpers/SourceStateTracker.java  |   296 -
 .../apache/tez/dag/app/rm/ContainerFactory.java |51 -
 .../dag/app/rm/LlapTaskSchedulerService.java|  1512 --
 .../src/protobuf/LlapDaemonProtocol.proto   |   148 -
 .../impl/TestLlapDaemonProtocolServerImpl.java  |11 +-
 .../TestLlapDaemonProtocolClientProxy.java  |   143 -
 .../tezplugins/TestLlapTaskCommunicator.java|   100 -
 .../app/rm/TestLlapTaskSchedulerService.java|   685 -
 llap-tez/pom.xml|   200 +
 .../hive/llap/tezplugins/ContainerFactory.java  |51 +
 .../llap/tezplugins/LlapContainerLauncher.java  |43 +
 .../llap/tezplugins/LlapTaskCommunicator.java   |   757 +
 .../tezplugins/LlapTaskSchedulerService.java|  1512 ++
 .../hive/llap/tezplugins/LlapTezUtils.java  |29 +
 .../tezplugins/LlapUmbilicalPolicyProvider.java |42 +
 .../tezplugins/helpers/SourceStateTracker.java  |   289 +
 .../tezplugins/TestLlapTaskCommunicator.java|   100 +
 .../TestLlapTaskSchedulerService.java   |   684 +
 pom.xml | 4 +-
 ql/pom.xml  |54 +
 .../hive/ql/exec/tez/TezSessionState.java   |58 +-
 63 files changed, 20631 insertions(+), 20141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-client/pom.xml
--
diff --git a/llap-client/pom.xml b/llap-client/pom.xml
index f6a5629..50c06a4 100644
--- a/llap-client/pom.xml
+++ b/llap-client/pom.xml
@@ -32,7 +32,7 @@
   
 
   
-
+
 
 
   org.apache.hive
@@ -41,41 +41,26 @@
 
 
   org.apache.hive
-  hive-ser

[03/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-server/src/test/org/apache/tez/dag/app/rm/TestLlapTaskSchedulerService.java
--
diff --git 
a/llap-server/src/test/org/apache/tez/dag/app/rm/TestLlapTaskSchedulerService.java
 
b/llap-server/src/test/org/apache/tez/dag/app/rm/TestLlapTaskSchedulerService.java
deleted file mode 100644
index 4c1cbb3..000
--- 
a/llap-server/src/test/org/apache/tez/dag/app/rm/TestLlapTaskSchedulerService.java
+++ /dev/null
@@ -1,685 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.tez.dag.app.rm;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.configuration.LlapConfiguration;
-import org.apache.hadoop.hive.llap.registry.impl.LlapFixedRegistryImpl;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.util.Clock;
-import org.apache.hadoop.yarn.util.SystemClock;
-import org.apache.tez.common.TezUtils;
-import org.apache.tez.dag.api.UserPayload;
-import org.apache.tez.dag.app.ControlledClock;
-import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
-import org.apache.tez.serviceplugins.api.TaskSchedulerContext;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TestLlapTaskSchedulerService {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestLlapTaskSchedulerService.class);
-
-  private static final String HOST1 = "host1";
-  private static final String HOST2 = "host2";
-  private static final String HOST3 = "host3";
-
-  @Test (timeout = 5000)
-  public void testSimpleLocalAllocation() throws IOException, 
InterruptedException {
-
-TestTaskSchedulerServiceWrapper tsWrapper = new 
TestTaskSchedulerServiceWrapper();
-
-try {
-  Priority priority1 = Priority.newInstance(1);
-  String[] hosts1 = new String[]{HOST1};
-
-  Object task1 = new Object();
-  Object clientCookie1 = new Object();
-
-  tsWrapper.controlScheduler(true);
-  tsWrapper.allocateTask(task1, hosts1, priority1, clientCookie1);
-
-  tsWrapper.signalSchedulerRun();
-  tsWrapper.awaitSchedulerRun();
-
-  verify(tsWrapper.mockAppCallback).taskAllocated(eq(task1), 
eq(clientCookie1), any(Container.class));
-  // TODO Verify this is on host1.
-  assertEquals(1, tsWrapper.ts.dagStats.numLocalAllocations);
-} finally {
-  tsWrapper.shutdown();
-}
-  }
-
-  @Test (timeout = 5000)
-  public void testSimpleNoLocalityAllocation() throws IOException, 
InterruptedException {
-TestTaskSchedulerServiceWrapper tsWrapper = new 
TestTaskSchedulerServiceWrapper();
-
-try {
-  Priority priority1 = Priority.newInstance(1);
-
-  Object task1 = new Object();
-  Object clientCookie1 = new Object();
-  tsWrapper.controlScheduler(true);
-  tsWrapper.allocateTask(task1, null, priority1, clientCookie1);
-  tsWrapper.signalSchedulerRun();
-  tsWrapper.awaitSchedulerRun();
-  verify(tsWrapper.mockAppCallback).taskAllocated(eq(task1), 
eq(clientCookie1), any(Container.class));
-  assertEquals(1, tsWrapper.ts.dagStats.numAllocationsNoLocalityRequest);
-} finally {
-  tsWrapper.shutdown();
-}
-  }
-
-
-  @Test(timeout=5000)
-  public void testPreemption() throws Inte

[06/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
--
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
deleted file mode 100644
index 515bf3c..000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hive.llap;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-
-public class LlapNodeId {
-
-  private static final LoadingCache CACHE =
-  CacheBuilder.newBuilder().softValues().build(
-  new CacheLoader() {
-@Override
-public LlapNodeId load(LlapNodeId key) throws Exception {
-  return key;
-}
-  });
-
-  public static LlapNodeId getInstance(String hostname, int port) {
-return CACHE.getUnchecked(new LlapNodeId(hostname, port));
-  }
-
-
-  private final String hostname;
-  private final int port;
-
-
-  private LlapNodeId(String hostname, int port) {
-this.hostname = hostname;
-this.port = port;
-  }
-
-  public String getHostname() {
-return hostname;
-  }
-
-  public int getPort() {
-return port;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-if (this == o) {
-  return true;
-}
-if (o == null || getClass() != o.getClass()) {
-  return false;
-}
-
-LlapNodeId that = (LlapNodeId) o;
-
-if (port != that.port) {
-  return false;
-}
-if (!hostname.equals(that.hostname)) {
-  return false;
-}
-
-return true;
-  }
-
-  @Override
-  public int hashCode() {
-int result = hostname.hashCode();
-result = 1009 * result + port;
-return result;
-  }
-
-  @Override
-  public String toString() {
-return hostname + ":" + port;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
index 544af09..0399798 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
@@ -25,6 +25,8 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Collection;
 
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.tezplugins.LlapTezUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -217,7 +219,13 @@ public class LlapServiceDriver {
 CompressionUtils.unTar(new Path(libDir, "tez.tar.gz").toString(), 
libDir.toString(), true);
 lfs.delete(new Path(libDir, "tez.tar.gz"), false);
 
+// llap-common
+lfs.copyFromLocalFile(new 
Path(Utilities.jarFinderGetJar(LlapDaemonProtocolProtos.class)), libDir);
+// llap-tez
+lfs.copyFromLocalFile(new 
Path(Utilities.jarFinderGetJar(LlapTezUtils.class)), libDir);
+// llap-server
 lfs.copyFromLocalFile(new 
Path(Utilities.jarFinderGetJar(LlapInputFormat.class)), libDir);
+// hive-exec
 lfs.copyFromLocalFile(new 
Path(Utilities.jarFinderGetJar(HiveInputFormat.class)), libDir);
 
 // copy default aux classes (json/hbase)

http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
deleted file mode 100644
index 4c09941..000
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a c

[08/10] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-common/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
--
diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java 
b/llap-common/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
new file mode 100644
index 000..515bf3c
--- /dev/null
+++ b/llap-common/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+public class LlapNodeId {
+
+  private static final LoadingCache CACHE =
+  CacheBuilder.newBuilder().softValues().build(
+  new CacheLoader() {
+@Override
+public LlapNodeId load(LlapNodeId key) throws Exception {
+  return key;
+}
+  });
+
+  public static LlapNodeId getInstance(String hostname, int port) {
+return CACHE.getUnchecked(new LlapNodeId(hostname, port));
+  }
+
+
+  private final String hostname;
+  private final int port;
+
+
+  private LlapNodeId(String hostname, int port) {
+this.hostname = hostname;
+this.port = port;
+  }
+
+  public String getHostname() {
+return hostname;
+  }
+
+  public int getPort() {
+return port;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+
+LlapNodeId that = (LlapNodeId) o;
+
+if (port != that.port) {
+  return false;
+}
+if (!hostname.equals(that.hostname)) {
+  return false;
+}
+
+return true;
+  }
+
+  @Override
+  public int hashCode() {
+int result = hostname.hashCode();
+result = 1009 * result + port;
+return result;
+  }
+
+  @Override
+  public String toString() {
+return hostname + ":" + port;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/4185d9b8/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
--
diff --git 
a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
 
b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
new file mode 100644
index 000..cd11bdb
--- /dev/null
+++ 
b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.impl;
+
+import javax.annotation.Nullable;
+import javax.net.SocketFactory;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolProxy;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.hive.llap.protocol.LlapManagementProtocolPB;
+import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto;
+import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto;
+import org.apache.hadoop.security.UserGroupInformation;
+
+public class LlapManagementProtocolClientImpl implements 
LlapManagementProtocolPB {
+
+  private final Configuration conf;
+  private final InetSocketAddress serverAddr;
+  private final RetryPolicy retryPolicy;
+  private final SocketFactory socketFactory;
+  LlapManagementProtocolPB proxy;
+
+
+  public LlapManagementProtocolClientImpl(Configuration conf, String ho

[11/11] hive git commit: HIVE-12934 branch-2.0 addendum. Update version fro new modules to reflect branch version. (Siddharth Seth)

2016-02-01 Thread sseth
HIVE-12934 branch-2.0 addendum. Update version fro new modules to
reflect branch version. (Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/98d51677
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/98d51677
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/98d51677

Branch: refs/heads/branch-2.0
Commit: 98d516770756df44bbf13f09cfa1cf1d7e4508d2
Parents: 9886414
Author: Siddharth Seth 
Authored: Mon Feb 1 09:57:26 2016 -0800
Committer: Siddharth Seth 
Committed: Mon Feb 1 09:57:26 2016 -0800

--
 llap-common/pom.xml | 2 +-
 llap-tez/pom.xml| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/98d51677/llap-common/pom.xml
--
diff --git a/llap-common/pom.xml b/llap-common/pom.xml
index 5343479..ac675cd 100644
--- a/llap-common/pom.xml
+++ b/llap-common/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-2.1.0-SNAPSHOT
+2.0.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/98d51677/llap-tez/pom.xml
--
diff --git a/llap-tez/pom.xml b/llap-tez/pom.xml
index ce020da..812d429 100644
--- a/llap-tez/pom.xml
+++ b/llap-tez/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-2.1.0-SNAPSHOT
+2.0.0-SNAPSHOT
 ../pom.xml
   
 



[05/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
deleted file mode 100644
index eb6384f..000
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
+++ /dev/null
@@ -1,755 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hive.llap.tezplugins;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.BiMap;
-import com.google.common.collect.HashBiMap;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.LlapNodeId;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
-import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
-import org.apache.hadoop.hive.llap.protocol.LlapTaskUmbilicalProtocol;
-import org.apache.hadoop.hive.llap.security.LlapTokenIdentifier;
-import org.apache.hadoop.hive.llap.tezplugins.helpers.SourceStateTracker;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.ProtocolSignature;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.tez.common.TezTaskUmbilicalProtocol;
-import org.apache.tez.common.security.JobTokenSecretManager;
-import org.apache.tez.dag.api.TezConfiguration;
-import org.apache.tez.dag.api.TezException;
-import org.apache.tez.dag.api.TezUncheckedException;
-import org.apache.tez.dag.api.event.VertexStateUpdate;
-import org.apache.tez.dag.app.TezTaskCommunicatorImpl;
-import org.apache.tez.dag.records.TezTaskAttemptID;
-import org.apache.tez.runtime.api.impl.TaskSpec;
-import org.apache.tez.runtime.api.impl.TezHeartbeatRequest;
-import org.apache.tez.runtime.api.impl.TezHeartbeatResponse;
-import org.apache.tez.serviceplugins.api.ContainerEndReason;
-import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
-import org.apache.tez.serviceplugins.api.TaskCommunicatorContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(LlapTaskCommunicator.class);
-
-  private static final boolean isInfoEnabled = LOG.isInfoEnabled();
-  private static final boolean is

[09/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
--
diff --git 
a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
 
b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
new file mode 100644
index 000..4ab7b32
--- /dev/null
+++ 
b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -0,0 +1,14553 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: LlapDaemonProtocol.proto
+
+package org.apache.hadoop.hive.llap.daemon.rpc;
+
+public final class LlapDaemonProtocolProtos {
+  private LlapDaemonProtocolProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  /**
+   * Protobuf enum {@code SourceStateProto}
+   */
+  public enum SourceStateProto
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * S_SUCCEEDED = 1;
+ */
+S_SUCCEEDED(0, 1),
+/**
+ * S_RUNNING = 2;
+ */
+S_RUNNING(1, 2),
+;
+
+/**
+ * S_SUCCEEDED = 1;
+ */
+public static final int S_SUCCEEDED_VALUE = 1;
+/**
+ * S_RUNNING = 2;
+ */
+public static final int S_RUNNING_VALUE = 2;
+
+
+public final int getNumber() { return value; }
+
+public static SourceStateProto valueOf(int value) {
+  switch (value) {
+case 1: return S_SUCCEEDED;
+case 2: return S_RUNNING;
+default: return null;
+  }
+}
+
+public static com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() {
+public SourceStateProto findValueByNumber(int number) {
+  return SourceStateProto.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.getDescriptor().getEnumTypes().get(0);
+}
+
+private static final SourceStateProto[] VALUES = values();
+
+public static SourceStateProto valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private SourceStateProto(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:SourceStateProto)
+  }
+
+  /**
+   * Protobuf enum {@code SubmissionStateProto}
+   */
+  public enum SubmissionStateProto
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * ACCEPTED = 1;
+ */
+ACCEPTED(0, 1),
+/**
+ * REJECTED = 2;
+ */
+REJECTED(1, 2),
+/**
+ * EVICTED_OTHER = 3;
+ */
+EVICTED_OTHER(2, 3),
+;
+
+/**
+ * ACCEPTED = 1;
+ */
+public static final int ACCEPTED_VALUE = 1;
+/**
+ * REJECTED = 2;
+ */
+public static final int REJECTED_VALUE = 2;
+/**
+ * EVICTED_OTHER = 3;
+ */
+public static final int EVICTED_OTHER_VALUE = 3;
+
+
+public final int getNumber() { return value; }
+
+public static SubmissionStateProto valueOf(int value) {
+  switch (value) {
+case 1: return ACCEPTED;
+case 2: return REJECTED;
+case 3: return EVICTED_OTHER;
+default: return null;
+  }
+}
+
+public static 
com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static 
com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() 
{
+public SubmissionStateProto findValueByNumber(int number) {
+  return SubmissionStateProto.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+ 

[08/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-common/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
--
diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java 
b/llap-common/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
new file mode 100644
index 000..515bf3c
--- /dev/null
+++ b/llap-common/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+public class LlapNodeId {
+
+  private static final LoadingCache CACHE =
+  CacheBuilder.newBuilder().softValues().build(
+  new CacheLoader() {
+@Override
+public LlapNodeId load(LlapNodeId key) throws Exception {
+  return key;
+}
+  });
+
+  public static LlapNodeId getInstance(String hostname, int port) {
+return CACHE.getUnchecked(new LlapNodeId(hostname, port));
+  }
+
+
+  private final String hostname;
+  private final int port;
+
+
+  private LlapNodeId(String hostname, int port) {
+this.hostname = hostname;
+this.port = port;
+  }
+
+  public String getHostname() {
+return hostname;
+  }
+
+  public int getPort() {
+return port;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+
+LlapNodeId that = (LlapNodeId) o;
+
+if (port != that.port) {
+  return false;
+}
+if (!hostname.equals(that.hostname)) {
+  return false;
+}
+
+return true;
+  }
+
+  @Override
+  public int hashCode() {
+int result = hostname.hashCode();
+result = 1009 * result + port;
+return result;
+  }
+
+  @Override
+  public String toString() {
+return hostname + ":" + port;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
--
diff --git 
a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
 
b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
new file mode 100644
index 000..cd11bdb
--- /dev/null
+++ 
b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.impl;
+
+import javax.annotation.Nullable;
+import javax.net.SocketFactory;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolProxy;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.hive.llap.protocol.LlapManagementProtocolPB;
+import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto;
+import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto;
+import org.apache.hadoop.security.UserGroupInformation;
+
+public class LlapManagementProtocolClientImpl implements 
LlapManagementProtocolPB {
+
+  private final Configuration conf;
+  private final InetSocketAddress serverAddr;
+  private final RetryPolicy retryPolicy;
+  private final SocketFactory socketFactory;
+  LlapManagementProtocolPB proxy;
+
+
+  public LlapManagementProtocolClientImpl(Configuration conf, String ho

[07/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
--
diff --git 
a/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
 
b/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
deleted file mode 100644
index 4ab7b32..000
--- 
a/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ /dev/null
@@ -1,14553 +0,0 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: LlapDaemonProtocol.proto
-
-package org.apache.hadoop.hive.llap.daemon.rpc;
-
-public final class LlapDaemonProtocolProtos {
-  private LlapDaemonProtocolProtos() {}
-  public static void registerAllExtensions(
-  com.google.protobuf.ExtensionRegistry registry) {
-  }
-  /**
-   * Protobuf enum {@code SourceStateProto}
-   */
-  public enum SourceStateProto
-  implements com.google.protobuf.ProtocolMessageEnum {
-/**
- * S_SUCCEEDED = 1;
- */
-S_SUCCEEDED(0, 1),
-/**
- * S_RUNNING = 2;
- */
-S_RUNNING(1, 2),
-;
-
-/**
- * S_SUCCEEDED = 1;
- */
-public static final int S_SUCCEEDED_VALUE = 1;
-/**
- * S_RUNNING = 2;
- */
-public static final int S_RUNNING_VALUE = 2;
-
-
-public final int getNumber() { return value; }
-
-public static SourceStateProto valueOf(int value) {
-  switch (value) {
-case 1: return S_SUCCEEDED;
-case 2: return S_RUNNING;
-default: return null;
-  }
-}
-
-public static com.google.protobuf.Internal.EnumLiteMap
-internalGetValueMap() {
-  return internalValueMap;
-}
-private static com.google.protobuf.Internal.EnumLiteMap
-internalValueMap =
-  new com.google.protobuf.Internal.EnumLiteMap() {
-public SourceStateProto findValueByNumber(int number) {
-  return SourceStateProto.valueOf(number);
-}
-  };
-
-public final com.google.protobuf.Descriptors.EnumValueDescriptor
-getValueDescriptor() {
-  return getDescriptor().getValues().get(index);
-}
-public final com.google.protobuf.Descriptors.EnumDescriptor
-getDescriptorForType() {
-  return getDescriptor();
-}
-public static final com.google.protobuf.Descriptors.EnumDescriptor
-getDescriptor() {
-  return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.getDescriptor().getEnumTypes().get(0);
-}
-
-private static final SourceStateProto[] VALUES = values();
-
-public static SourceStateProto valueOf(
-com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
-  if (desc.getType() != getDescriptor()) {
-throw new java.lang.IllegalArgumentException(
-  "EnumValueDescriptor is not for this type.");
-  }
-  return VALUES[desc.getIndex()];
-}
-
-private final int index;
-private final int value;
-
-private SourceStateProto(int index, int value) {
-  this.index = index;
-  this.value = value;
-}
-
-// @@protoc_insertion_point(enum_scope:SourceStateProto)
-  }
-
-  /**
-   * Protobuf enum {@code SubmissionStateProto}
-   */
-  public enum SubmissionStateProto
-  implements com.google.protobuf.ProtocolMessageEnum {
-/**
- * ACCEPTED = 1;
- */
-ACCEPTED(0, 1),
-/**
- * REJECTED = 2;
- */
-REJECTED(1, 2),
-/**
- * EVICTED_OTHER = 3;
- */
-EVICTED_OTHER(2, 3),
-;
-
-/**
- * ACCEPTED = 1;
- */
-public static final int ACCEPTED_VALUE = 1;
-/**
- * REJECTED = 2;
- */
-public static final int REJECTED_VALUE = 2;
-/**
- * EVICTED_OTHER = 3;
- */
-public static final int EVICTED_OTHER_VALUE = 3;
-
-
-public final int getNumber() { return value; }
-
-public static SubmissionStateProto valueOf(int value) {
-  switch (value) {
-case 1: return ACCEPTED;
-case 2: return REJECTED;
-case 3: return EVICTED_OTHER;
-default: return null;
-  }
-}
-
-public static 
com.google.protobuf.Internal.EnumLiteMap
-internalGetValueMap() {
-  return internalValueMap;
-}
-private static 
com.google.protobuf.Internal.EnumLiteMap
-internalValueMap =
-  new com.google.protobuf.Internal.EnumLiteMap() 
{
-public SubmissionStateProto findValueByNumber(int number) {
-  return SubmissionStateProto.valueOf(number);
-}
-  };
-
-public final com.google.protobuf.Descriptors.EnumValueDescriptor
-getValueDescriptor() {
-  return getDescriptor().getValues().get(index);
-}
-public final com.google.protobuf.Descriptors.EnumDescriptor
-getDescriptorForType() 

[01/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
Repository: hive
Updated Branches:
  refs/heads/branch-2.0 29a988fd9 -> 98d516770


http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java
--
diff --git 
a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java
 
b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java
new file mode 100644
index 000..36d8ffd
--- /dev/null
+++ 
b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java
@@ -0,0 +1,684 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.tezplugins;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.llap.registry.impl.LlapFixedRegistryImpl;
+import org.apache.hadoop.hive.llap.testhelpers.ControlledClock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
+import org.apache.tez.common.TezUtils;
+import org.apache.tez.dag.api.UserPayload;
+import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
+import org.apache.tez.serviceplugins.api.TaskSchedulerContext;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TestLlapTaskSchedulerService {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestLlapTaskSchedulerService.class);
+
+  private static final String HOST1 = "host1";
+  private static final String HOST2 = "host2";
+  private static final String HOST3 = "host3";
+
+  @Test (timeout = 5000)
+  public void testSimpleLocalAllocation() throws IOException, 
InterruptedException {
+
+TestTaskSchedulerServiceWrapper tsWrapper = new 
TestTaskSchedulerServiceWrapper();
+
+try {
+  Priority priority1 = Priority.newInstance(1);
+  String[] hosts1 = new String[]{HOST1};
+
+  Object task1 = new Object();
+  Object clientCookie1 = new Object();
+
+  tsWrapper.controlScheduler(true);
+  tsWrapper.allocateTask(task1, hosts1, priority1, clientCookie1);
+
+  tsWrapper.signalSchedulerRun();
+  tsWrapper.awaitSchedulerRun();
+
+  verify(tsWrapper.mockAppCallback).taskAllocated(eq(task1), 
eq(clientCookie1), any(Container.class));
+  // TODO Verify this is on host1.
+  assertEquals(1, tsWrapper.ts.dagStats.numLocalAllocations);
+} finally {
+  tsWrapper.shutdown();
+}
+  }
+
+  @Test (timeout = 5000)
+  public void testSimpleNoLocalityAllocation() throws IOException, 
InterruptedException {
+TestTaskSchedulerServiceWrapper tsWrapper = new 
TestTaskSchedulerServiceWrapper();
+
+try {
+  Priority priority1 = Priority.newInstance(1);
+
+  Object task1 = new Object();
+  Object clientCookie1 = new Object();
+  tsWrapper.controlScheduler(true);
+  tsWrapper.allocateTask(task1, null, priority1, clientCookie1);
+  tsWrapper.signalSchedulerRun();
+  tsWrapper.awaitSchedulerRun();
+  verify(tsWrapper.mockAppCallback).taskAllocated(eq(task1), 
eq(clientCookie1), any(Container.class));
+  assertEquals(1, tsWrapper.ts.dagStats.numAllocationsNoLocalityRequest);
+} finally {
+  tsWrapper.shutdown();
+

[02/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
--
diff --git 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
new file mode 100644
index 000..3bca0da
--- /dev/null
+++ 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
@@ -0,0 +1,1512 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.tezplugins;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.DelayQueue;
+import java.util.concurrent.Delayed;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.apache.commons.lang.mutable.MutableInt;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.llap.registry.ServiceInstance;
+import org.apache.hadoop.hive.llap.registry.ServiceInstanceSet;
+import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Clock;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.yarn.util.SystemClock;
+import org.apache.tez.common.TezUtils;
+import org.apache.tez.dag.api.TezUncheckedException;
+import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
+import org.apache.tez.serviceplugins.api.TaskScheduler;
+import org.apache.tez.serviceplugins.api.TaskSchedulerContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LlapTaskSchedulerService extends TaskScheduler {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(LlapTaskSchedulerService.class);
+
+  private final Configuration conf;
+
+  // interface into the registry service
+  private ServiceInstanceSet activeInstances;
+
+  // Tracks all instances, including ones which have been disabled in the past.
+  // LinkedHashMap to provide the same iteration order when selecting a random 
host.
+  @VisibleForTesting
+  final Map instanceToNodeMap = new 
LinkedHashMap<>();
+  // TODO Ideally, remove elements from this once it's known that no tasks are 
linked to the instance (all deallocated)
+
+  // Tracks tasks which could not be allocated immediately.
+  @VisibleForTesting
+  final TreeMap> pendingTasks = new TreeMap<>(new 
Comparator() {
+@Override
+public int compare(Priority o1, Priority o2) {
+  return o1.getPriority() - o2.getPriority();
+}
+  });
+
+  // Tracks running and queued tasks. Cleared after a task completes.
+  private final Concurr

[10/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
HIVE-12934. Refactor llap module structure to allow for a usable client. 
(Siddharth Seth, reviewed by Sergey Shelukhin)
(cherry picked from commit 4185d9b8e2eecfef3b5a38899f6928fa82c01e99)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9886414b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9886414b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9886414b

Branch: refs/heads/branch-2.0
Commit: 9886414b3dd973c1630b84634f0296adac7a193b
Parents: 29a988f
Author: Siddharth Seth 
Authored: Mon Feb 1 09:45:09 2016 -0800
Committer: Siddharth Seth 
Committed: Mon Feb 1 09:46:18 2016 -0800

--
 llap-client/pom.xml |87 +-
 .../hive/llap/security/LlapTokenIdentifier.java |82 -
 .../hive/llap/security/LlapTokenProvider.java   |27 -
 .../hive/llap/tez/LlapProtocolClientProxy.java  |   509 +
 .../tez/TestLlapDaemonProtocolClientProxy.java  |   144 +
 llap-common/pom.xml |   235 +
 .../daemon/rpc/LlapDaemonProtocolProtos.java| 14553 +
 .../org/apache/hadoop/hive/llap/LlapNodeId.java |86 +
 .../impl/LlapManagementProtocolClientImpl.java  |82 +
 .../hive/llap/impl/LlapProtocolClientImpl.java  |   125 +
 .../llap/protocol/LlapManagementProtocolPB.java |26 +
 .../llap/protocol/LlapProtocolBlockingPB.java   |30 +
 .../protocol/LlapTaskUmbilicalProtocol.java |42 +
 .../hive/llap/security/LlapTokenIdentifier.java |82 +
 .../hive/llap/security/LlapTokenProvider.java   |27 +
 .../hive/llap/security/LlapTokenSelector.java   |53 +
 .../apache/hadoop/hive/llap/tez/Converters.java |   265 +
 .../src/protobuf/LlapDaemonProtocol.proto   |   148 +
 .../hive/llap/testhelpers/ControlledClock.java  |43 +
 llap-server/pom.xml |   199 +-
 .../daemon/rpc/LlapDaemonProtocolProtos.java| 14553 -
 .../org/apache/hadoop/hive/llap/LlapNodeId.java |86 -
 .../hadoop/hive/llap/cli/LlapServiceDriver.java | 8 +
 .../daemon/LlapDaemonProtocolBlockingPB.java|28 -
 .../LlapManagementProtocolBlockingPB.java   |24 -
 .../hive/llap/daemon/impl/LlapDaemon.java   | 4 +-
 .../impl/LlapDaemonProtocolClientImpl.java  |   125 -
 .../impl/LlapDaemonProtocolServerImpl.java  |   251 -
 .../impl/LlapManagementProtocolClientImpl.java  |82 -
 .../daemon/impl/LlapProtocolServerImpl.java |   251 +
 .../llap/daemon/impl/QueryFragmentInfo.java |13 +-
 .../llap/daemon/impl/TaskRunnerCallable.java| 2 +-
 .../protocol/LlapTaskUmbilicalProtocol.java |42 -
 .../llap/security/LlapDaemonPolicyProvider.java | 8 +-
 .../hive/llap/security/LlapSecurityHelper.java  | 2 +-
 .../llap/security/LlapServerSecurityInfo.java   |10 +-
 .../hive/llap/security/LlapTokenSelector.java   |53 -
 .../hadoop/hive/llap/tezplugins/Converters.java |   265 -
 .../llap/tezplugins/LlapContainerLauncher.java  |43 -
 .../LlapDaemonProtocolClientProxy.java  |   509 -
 .../llap/tezplugins/LlapTaskCommunicator.java   |   755 -
 .../tezplugins/LlapUmbilicalPolicyProvider.java |42 -
 .../tezplugins/helpers/SourceStateTracker.java  |   296 -
 .../apache/tez/dag/app/rm/ContainerFactory.java |51 -
 .../dag/app/rm/LlapTaskSchedulerService.java|  1512 --
 .../src/protobuf/LlapDaemonProtocol.proto   |   148 -
 .../impl/TestLlapDaemonProtocolServerImpl.java  |11 +-
 .../TestLlapDaemonProtocolClientProxy.java  |   143 -
 .../tezplugins/TestLlapTaskCommunicator.java|   100 -
 .../app/rm/TestLlapTaskSchedulerService.java|   685 -
 llap-tez/pom.xml|   200 +
 .../hive/llap/tezplugins/ContainerFactory.java  |51 +
 .../llap/tezplugins/LlapContainerLauncher.java  |43 +
 .../llap/tezplugins/LlapTaskCommunicator.java   |   757 +
 .../tezplugins/LlapTaskSchedulerService.java|  1512 ++
 .../hive/llap/tezplugins/LlapTezUtils.java  |29 +
 .../tezplugins/LlapUmbilicalPolicyProvider.java |42 +
 .../tezplugins/helpers/SourceStateTracker.java  |   289 +
 .../tezplugins/TestLlapTaskCommunicator.java|   100 +
 .../TestLlapTaskSchedulerService.java   |   684 +
 pom.xml | 4 +-
 ql/pom.xml  |54 +
 .../hive/ql/exec/tez/TezSessionState.java   |58 +-
 63 files changed, 20631 insertions(+), 20139 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-client/pom.xml
--
diff --git a/llap-client/pom.xml b/llap-client/pom.xml
index 3c85180..a0eeb44 100644
--- a/llap-client/pom.xml
+++ b/llap-client/pom.xml
@@ -32,7 +32,7 @@
   
 
   
-
+
 
 
   org.apache.hi

[06/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
--
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
deleted file mode 100644
index 515bf3c..000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hive.llap;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-
-public class LlapNodeId {
-
-  private static final LoadingCache CACHE =
-  CacheBuilder.newBuilder().softValues().build(
-  new CacheLoader() {
-@Override
-public LlapNodeId load(LlapNodeId key) throws Exception {
-  return key;
-}
-  });
-
-  public static LlapNodeId getInstance(String hostname, int port) {
-return CACHE.getUnchecked(new LlapNodeId(hostname, port));
-  }
-
-
-  private final String hostname;
-  private final int port;
-
-
-  private LlapNodeId(String hostname, int port) {
-this.hostname = hostname;
-this.port = port;
-  }
-
-  public String getHostname() {
-return hostname;
-  }
-
-  public int getPort() {
-return port;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-if (this == o) {
-  return true;
-}
-if (o == null || getClass() != o.getClass()) {
-  return false;
-}
-
-LlapNodeId that = (LlapNodeId) o;
-
-if (port != that.port) {
-  return false;
-}
-if (!hostname.equals(that.hostname)) {
-  return false;
-}
-
-return true;
-  }
-
-  @Override
-  public int hashCode() {
-int result = hostname.hashCode();
-result = 1009 * result + port;
-return result;
-  }
-
-  @Override
-  public String toString() {
-return hostname + ":" + port;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
index 544af09..0399798 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
@@ -25,6 +25,8 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Collection;
 
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.tezplugins.LlapTezUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -217,7 +219,13 @@ public class LlapServiceDriver {
 CompressionUtils.unTar(new Path(libDir, "tez.tar.gz").toString(), 
libDir.toString(), true);
 lfs.delete(new Path(libDir, "tez.tar.gz"), false);
 
+// llap-common
+lfs.copyFromLocalFile(new 
Path(Utilities.jarFinderGetJar(LlapDaemonProtocolProtos.class)), libDir);
+// llap-tez
+lfs.copyFromLocalFile(new 
Path(Utilities.jarFinderGetJar(LlapTezUtils.class)), libDir);
+// llap-server
 lfs.copyFromLocalFile(new 
Path(Utilities.jarFinderGetJar(LlapInputFormat.class)), libDir);
+// hive-exec
 lfs.copyFromLocalFile(new 
Path(Utilities.jarFinderGetJar(HiveInputFormat.class)), libDir);
 
 // copy default aux classes (json/hbase)

http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
deleted file mode 100644
index 4c09941..000
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a c

[04/11] hive git commit: HIVE-12934. Refactor llap module structure to allow for a usable client. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 4185d9b8e2eecfef3b5a38899f69

2016-02-01 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/9886414b/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
--
diff --git 
a/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java 
b/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
deleted file mode 100644
index 6beb4f8..000
--- 
a/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
+++ /dev/null
@@ -1,1512 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tez.dag.app.rm;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.NavigableMap;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.DelayQueue;
-import java.util.concurrent.Delayed;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import org.apache.commons.lang.mutable.MutableInt;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.registry.ServiceInstance;
-import org.apache.hadoop.hive.llap.registry.ServiceInstanceSet;
-import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.util.Clock;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.yarn.util.SystemClock;
-import org.apache.tez.common.TezUtils;
-import org.apache.tez.dag.api.TezUncheckedException;
-import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
-import org.apache.tez.serviceplugins.api.TaskScheduler;
-import org.apache.tez.serviceplugins.api.TaskSchedulerContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LlapTaskSchedulerService extends TaskScheduler {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(LlapTaskSchedulerService.class);
-
-  private final Configuration conf;
-
-  // interface into the registry service
-  private ServiceInstanceSet activeInstances;
-
-  // Tracks all instances, including ones which have been disabled in the past.
-  // LinkedHashMap to provide the same iteration order when selecting a random 
host.
-  @VisibleForTesting
-  final Map instanceToNodeMap = new 
LinkedHashMap<>();
-  // TODO Ideally, remove elements from this once it's known that no tasks are 
linked to the instance (all deallocated)
-
-  // Tracks tasks which could not be allocated immediately.
-  @VisibleForTesting
-  final TreeMap> pendingTasks = new TreeMap<>(new 
Comparator() {
-@Override
-public int compare(Priority o1, Priority o2) {
-  return o1.getPriority() - o2.getPriority();
-}
-  });
-
-  // Tracks running and queued tasks. Cleared after a task completes.
-  private final ConcurrentMap knownTasks = new 
ConcurrentHashMap<>();
- 

[2/2] hive git commit: HIVE-12945 : Bucket pruning: bucketing for -ve hashcodes have historical issues (Gopal V, reviewed by Sergey Shelukhin, Prasanth Jayachandran)

2016-02-01 Thread sershe
HIVE-12945 : Bucket pruning: bucketing for -ve hashcodes have historical issues 
(Gopal V, reviewed by Sergey Shelukhin, Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5b7230d8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5b7230d8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5b7230d8

Branch: refs/heads/branch-2.0
Commit: 5b7230d8ea304baee3020ebcc230cb8289856598
Parents: 98d5167
Author: Sergey Shelukhin 
Authored: Mon Feb 1 10:33:15 2016 -0800
Committer: Sergey Shelukhin 
Committed: Mon Feb 1 10:35:54 2016 -0800

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  4 +
 .../test/resources/testconfiguration.properties |  1 +
 .../optimizer/FixedBucketPruningOptimizer.java  | 25 +-
 .../hadoop/hive/ql/optimizer/Optimizer.java |  4 +-
 .../queries/clientpositive/bucketpruning1.q |  6 +-
 .../clientpositive/tez/bucketpruning1.q.out | 86 
 6 files changed, 122 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5b7230d8/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 2fb283d..6b38aa5 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2336,6 +2336,10 @@ public class HiveConf extends Configuration {
  "When pruning is enabled, filters on bucket columns will be processed 
by \n" +
  "filtering the splits against a bitset of included buckets. This 
needs predicates \n"+
  "produced by hive.optimize.ppd and hive.optimize.index.filters."),
+TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT(
+"hive.tez.bucket.pruning.compat", true,
+"When pruning is enabled, handle possibly broken inserts due to 
negative hashcodes.\n" +
+"This occasionally doubles the data scan cost, but is default enabled 
for safety"),
 TEZ_DYNAMIC_PARTITION_PRUNING(
 "hive.tez.dynamic.partition.pruning", true,
 "When dynamic pruning is enabled, joins on partition keys will be 
processed by sending\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/5b7230d8/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index c7535b7..e575eef 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -380,6 +380,7 @@ minitez.query.files=bucket_map_join_tez1.q,\
   constprog_dpp.q,\
   dynamic_partition_pruning.q,\
   dynamic_partition_pruning_2.q,\
+  bucketpruning1.q,\
   explainuser_1.q,\
   explainuser_2.q,\
   explainuser_3.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/5b7230d8/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
index c63318e..9e9beb0 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
@@ -65,6 +65,12 @@ public class FixedBucketPruningOptimizer extends Transform {
   private static final Log LOG = LogFactory
   .getLog(FixedBucketPruningOptimizer.class.getName());
 
+  private final boolean compat;
+
+  public FixedBucketPruningOptimizer(boolean compat) {
+this.compat = compat;
+  }
+
   public class NoopWalker implements NodeProcessor {
 @Override
 public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -229,6 +235,14 @@ public class FixedBucketPruningOptimizer extends Transform 
{
 Object convCols[] = new Object[] {conv.convert(literal)};
 int n = ObjectInspectorUtils.getBucketNumber(convCols, new 
ObjectInspector[]{constOI}, ctxt.getNumBuckets());
 bs.set(n);
+if (ctxt.isCompat()) {
+  int h = ObjectInspectorUtils.getBucketHashCode(convCols, new 
ObjectInspector[]{constOI});
+  // -ve hashcodes had conversion to positive done in different ways 
in the past
+  // abs() is now obsolete and all inserts now use & Integer.MAX_VALUE 
+  // the compat mode assumes that old data could've been loaded using 
the other conversion
+  n = ObjectInspectorUtils.getBucketNumber(Math.abs(h), 
ctx

[1/2] hive git commit: HIVE-12945 : Bucket pruning: bucketing for -ve hashcodes have historical issues (Gopal V, reviewed by Sergey Shelukhin, Prasanth Jayachandran)

2016-02-01 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-2.0 98d516770 -> 5b7230d8e
  refs/heads/master 4185d9b8e -> 43837e8ef


HIVE-12945 : Bucket pruning: bucketing for -ve hashcodes have historical issues 
(Gopal V, reviewed by Sergey Shelukhin, Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/43837e8e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/43837e8e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/43837e8e

Branch: refs/heads/master
Commit: 43837e8ef0a74b8ac777f0de9227c9bf233a27a6
Parents: 4185d9b
Author: Sergey Shelukhin 
Authored: Mon Feb 1 10:33:15 2016 -0800
Committer: Sergey Shelukhin 
Committed: Mon Feb 1 10:33:15 2016 -0800

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  4 +
 .../test/resources/testconfiguration.properties |  1 +
 .../optimizer/FixedBucketPruningOptimizer.java  | 25 +-
 .../hadoop/hive/ql/optimizer/Optimizer.java |  4 +-
 .../queries/clientpositive/bucketpruning1.q |  6 +-
 .../clientpositive/tez/bucketpruning1.q.out | 86 
 6 files changed, 122 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/43837e8e/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index ff376a8..6678de6 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2366,6 +2366,10 @@ public class HiveConf extends Configuration {
  "When pruning is enabled, filters on bucket columns will be processed 
by \n" +
  "filtering the splits against a bitset of included buckets. This 
needs predicates \n"+
  "produced by hive.optimize.ppd and hive.optimize.index.filters."),
+TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT(
+"hive.tez.bucket.pruning.compat", true,
+"When pruning is enabled, handle possibly broken inserts due to 
negative hashcodes.\n" +
+"This occasionally doubles the data scan cost, but is default enabled 
for safety"),
 TEZ_DYNAMIC_PARTITION_PRUNING(
 "hive.tez.dynamic.partition.pruning", true,
 "When dynamic pruning is enabled, joins on partition keys will be 
processed by sending\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/43837e8e/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index a6e599c..dfd221e 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -380,6 +380,7 @@ minitez.query.files=bucket_map_join_tez1.q,\
   constprog_dpp.q,\
   dynamic_partition_pruning.q,\
   dynamic_partition_pruning_2.q,\
+  bucketpruning1.q,\
   explainuser_1.q,\
   explainuser_2.q,\
   explainuser_3.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/43837e8e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
index c63318e..9e9beb0 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
@@ -65,6 +65,12 @@ public class FixedBucketPruningOptimizer extends Transform {
   private static final Log LOG = LogFactory
   .getLog(FixedBucketPruningOptimizer.class.getName());
 
+  private final boolean compat;
+
+  public FixedBucketPruningOptimizer(boolean compat) {
+this.compat = compat;
+  }
+
   public class NoopWalker implements NodeProcessor {
 @Override
 public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -229,6 +235,14 @@ public class FixedBucketPruningOptimizer extends Transform 
{
 Object convCols[] = new Object[] {conv.convert(literal)};
 int n = ObjectInspectorUtils.getBucketNumber(convCols, new 
ObjectInspector[]{constOI}, ctxt.getNumBuckets());
 bs.set(n);
+if (ctxt.isCompat()) {
+  int h = ObjectInspectorUtils.getBucketHashCode(convCols, new 
ObjectInspector[]{constOI});
+  // -ve hashcodes had conversion to positive done in different ways 
in the past
+  // abs() is now obsolete and all inserts now use & Integer.MAX_VALUE 
+  // the compat mode assumes that old 

hive git commit: HIVE-12931. Shuffle tokens stay around forever in LLAP. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-02-01 Thread sseth
Repository: hive
Updated Branches:
  refs/heads/master 43837e8ef -> 1e6fa1eb3


HIVE-12931. Shuffle tokens stay around forever in LLAP. (Siddharth Seth, 
reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1e6fa1eb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1e6fa1eb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1e6fa1eb

Branch: refs/heads/master
Commit: 1e6fa1eb3f8b7eeb551355cb6ebee483ef4e4d71
Parents: 43837e8
Author: Siddharth Seth 
Authored: Mon Feb 1 11:14:37 2016 -0800
Committer: Siddharth Seth 
Committed: Mon Feb 1 11:14:37 2016 -0800

--
 .../llap/daemon/impl/ContainerRunnerImpl.java   | 31 +
 .../hive/llap/daemon/impl/QueryTracker.java | 14 +-
 .../llap/shufflehandler/ShuffleHandler.java | 49 +++-
 3 files changed, 62 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1e6fa1eb/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index 535fe76..e80fb15 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -46,7 +46,6 @@ import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWor
 import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
 import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
-import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
 import org.apache.hadoop.hive.ql.exec.tez.TezProcessor;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.security.Credentials;
@@ -178,36 +177,30 @@ public class ContainerRunnerImpl extends CompositeService 
implements ContainerRu
 
   QueryIdentifier queryIdentifier = new 
QueryIdentifier(request.getApplicationIdString(), dagIdentifier);
 
+  Credentials credentials = new Credentials();
+  DataInputBuffer dib = new DataInputBuffer();
+  byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
+  dib.reset(tokenBytes, tokenBytes.length);
+  credentials.readTokenStorageStream(dib);
+
+  Token jobToken = 
TokenCache.getSessionToken(credentials);
+
   QueryFragmentInfo fragmentInfo = queryTracker
-  .registerFragment(queryIdentifier, request.getApplicationIdString(), 
fragmentSpec.getDagName(),
+  .registerFragment(queryIdentifier, request.getApplicationIdString(),
+  fragmentSpec.getDagName(),
   dagIdentifier,
   fragmentSpec.getVertexName(), fragmentSpec.getFragmentNumber(),
-  fragmentSpec.getAttemptNumber(), request.getUser(), 
request.getFragmentSpec());
+  fragmentSpec.getAttemptNumber(), request.getUser(), 
request.getFragmentSpec(),
+  jobToken);
 
   String[] localDirs = fragmentInfo.getLocalDirs();
   Preconditions.checkNotNull(localDirs);
-
   if (LOG.isDebugEnabled()) {
 LOG.debug("Dirs are: " + Arrays.toString(localDirs));
   }
   // May need to setup localDir for re-localization, which is usually 
setup as Environment.PWD.
   // Used for re-localization, to add the user specified configuration 
(conf_pb_binary_stream)
 
-  Credentials credentials = new Credentials();
-  DataInputBuffer dib = new DataInputBuffer();
-  byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
-  dib.reset(tokenBytes, tokenBytes.length);
-  credentials.readTokenStorageStream(dib);
-
-  Token jobToken = 
TokenCache.getSessionToken(credentials);
-
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Registering request with the ShuffleHandler");
-  }
-  ShuffleHandler.get()
-  .registerDag(request.getApplicationIdString(), dagIdentifier, 
jobToken,
-  request.getUser(), localDirs);
-
   TaskRunnerCallable callable = new TaskRunnerCallable(request, 
fragmentInfo, new Configuration(getConfig()),
   new LlapExecutionContext(localAddress.get().getHostName(), 
queryTracker), env,
   credentials, memoryPerExecutor, amReporter, confParams, metrics, 
killedTaskHandler,

http://git-wip-us.apache.org/repos/asf/hive/blob/1e6fa1eb/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
--

hive git commit: HIVE-12931. Shuffle tokens stay around forever in LLAP. (Siddharth Seth, reviewed by Sergey Shelukhin) (cherry picked from commit 1e6fa1eb3f8b7eeb551355cb6ebee483ef4e4d71)

2016-02-01 Thread sseth
Repository: hive
Updated Branches:
  refs/heads/branch-2.0 5b7230d8e -> 91adc5287


HIVE-12931. Shuffle tokens stay around forever in LLAP. (Siddharth Seth, 
reviewed by Sergey Shelukhin)
(cherry picked from commit 1e6fa1eb3f8b7eeb551355cb6ebee483ef4e4d71)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/91adc528
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/91adc528
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/91adc528

Branch: refs/heads/branch-2.0
Commit: 91adc5287b6677ea1ea23b97524b007ff01568fa
Parents: 5b7230d
Author: Siddharth Seth 
Authored: Mon Feb 1 11:14:37 2016 -0800
Committer: Siddharth Seth 
Committed: Mon Feb 1 11:15:23 2016 -0800

--
 .../llap/daemon/impl/ContainerRunnerImpl.java   | 31 +
 .../hive/llap/daemon/impl/QueryTracker.java | 14 +-
 .../llap/shufflehandler/ShuffleHandler.java | 49 +++-
 3 files changed, 62 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/91adc528/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index a2a55cc..e7c7f44 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -46,7 +46,6 @@ import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWor
 import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
 import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
-import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
 import org.apache.hadoop.hive.ql.exec.tez.TezProcessor;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.security.Credentials;
@@ -178,36 +177,30 @@ public class ContainerRunnerImpl extends CompositeService 
implements ContainerRu
 
   QueryIdentifier queryIdentifier = new 
QueryIdentifier(request.getApplicationIdString(), dagIdentifier);
 
+  Credentials credentials = new Credentials();
+  DataInputBuffer dib = new DataInputBuffer();
+  byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
+  dib.reset(tokenBytes, tokenBytes.length);
+  credentials.readTokenStorageStream(dib);
+
+  Token jobToken = 
TokenCache.getSessionToken(credentials);
+
   QueryFragmentInfo fragmentInfo = queryTracker
-  .registerFragment(queryIdentifier, request.getApplicationIdString(), 
fragmentSpec.getDagName(),
+  .registerFragment(queryIdentifier, request.getApplicationIdString(),
+  fragmentSpec.getDagName(),
   dagIdentifier,
   fragmentSpec.getVertexName(), fragmentSpec.getFragmentNumber(),
-  fragmentSpec.getAttemptNumber(), request.getUser(), 
request.getFragmentSpec());
+  fragmentSpec.getAttemptNumber(), request.getUser(), 
request.getFragmentSpec(),
+  jobToken);
 
   String[] localDirs = fragmentInfo.getLocalDirs();
   Preconditions.checkNotNull(localDirs);
-
   if (LOG.isDebugEnabled()) {
 LOG.debug("Dirs are: " + Arrays.toString(localDirs));
   }
   // May need to setup localDir for re-localization, which is usually 
setup as Environment.PWD.
   // Used for re-localization, to add the user specified configuration 
(conf_pb_binary_stream)
 
-  Credentials credentials = new Credentials();
-  DataInputBuffer dib = new DataInputBuffer();
-  byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
-  dib.reset(tokenBytes, tokenBytes.length);
-  credentials.readTokenStorageStream(dib);
-
-  Token jobToken = 
TokenCache.getSessionToken(credentials);
-
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Registering request with the ShuffleHandler");
-  }
-  ShuffleHandler.get()
-  .registerDag(request.getApplicationIdString(), dagIdentifier, 
jobToken,
-  request.getUser(), localDirs);
-
   TaskRunnerCallable callable = new TaskRunnerCallable(request, 
fragmentInfo, new Configuration(getConfig()),
   new LlapExecutionContext(localAddress.get().getHostName(), 
queryTracker), env,
   credentials, memoryPerExecutor, amReporter, confParams, metrics, 
killedTaskHandler,

http://git-wip-us.apache.org/repos/asf/hive/blob/91adc528/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTr

[2/2] hive git commit: HIVE-12964 : TestOperationLoggingAPIWithMr, TestOperationLoggingAPIWithTez fail on branch-2.0 (with Java 7, at least) (Prasanth Jayachandran, reviewed by Sergey Shelukhin)

2016-02-01 Thread sershe
HIVE-12964 : TestOperationLoggingAPIWithMr,TestOperationLoggingAPIWithTez fail 
on branch-2.0 (with Java 7, at least) (Prasanth Jayachandran, reviewed by 
Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/29347907
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/29347907
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/29347907

Branch: refs/heads/branch-2.0
Commit: 29347907af59316517cc59eed56d6b3ee9b60ce4
Parents: 91adc52
Author: Sergey Shelukhin 
Authored: Mon Feb 1 11:14:57 2016 -0800
Committer: Sergey Shelukhin 
Committed: Mon Feb 1 11:26:47 2016 -0800

--
 .../org/apache/hive/service/cli/operation/LogDivertAppender.java| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/29347907/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
--
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java 
b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
index 59dc433..eaf1acb 100644
--- 
a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
+++ 
b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
@@ -164,6 +164,7 @@ public class LogDivertAppender
   // and set the pattern and excludeMatches accordingly.
   if (currentLoggingMode != loggingMode) {
 loggingMode = currentLoggingMode;
+excludeMatches = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
 setCurrentNamePattern(loggingMode);
   }
 



[1/2] hive git commit: HIVE-12964 : TestOperationLoggingAPIWithMr, TestOperationLoggingAPIWithTez fail on branch-2.0 (with Java 7, at least) (Prasanth Jayachandran, reviewed by Sergey Shelukhin)

2016-02-01 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-2.0 91adc5287 -> 29347907a
  refs/heads/master 1e6fa1eb3 -> 5140e73ca


HIVE-12964 : TestOperationLoggingAPIWithMr,TestOperationLoggingAPIWithTez fail 
on branch-2.0 (with Java 7, at least) (Prasanth Jayachandran, reviewed by 
Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5140e73c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5140e73c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5140e73c

Branch: refs/heads/master
Commit: 5140e73ca8f7258b8cd3cdc03a3691e09e4df8d9
Parents: 1e6fa1e
Author: Sergey Shelukhin 
Authored: Mon Feb 1 11:14:57 2016 -0800
Committer: Sergey Shelukhin 
Committed: Mon Feb 1 11:26:26 2016 -0800

--
 .../org/apache/hive/service/cli/operation/LogDivertAppender.java| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5140e73c/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
--
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java 
b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
index 59dc433..eaf1acb 100644
--- 
a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
+++ 
b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
@@ -164,6 +164,7 @@ public class LogDivertAppender
   // and set the pattern and excludeMatches accordingly.
   if (currentLoggingMode != loggingMode) {
 loggingMode = currentLoggingMode;
+excludeMatches = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
 setCurrentNamePattern(loggingMode);
   }
 



hive git commit: HIVE-12947: SMB join in tez has ClassCastException when container reuse is on (Vikram Dixit K, reviewed by Sergey Shelukhin)

2016-02-01 Thread vikram
Repository: hive
Updated Branches:
  refs/heads/branch-2.0 29347907a -> d20afd342


HIVE-12947: SMB join in tez has ClassCastException when container reuse is on 
(Vikram Dixit K, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d20afd34
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d20afd34
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d20afd34

Branch: refs/heads/branch-2.0
Commit: d20afd3428c7e9a03a778c120501e36b51a389ca
Parents: 2934790
Author: vikram 
Authored: Mon Feb 1 11:52:09 2016 -0800
Committer: vikram 
Committed: Mon Feb 1 11:52:09 2016 -0800

--
 .../test/resources/testconfiguration.properties |   1 +
 .../hive/ql/exec/CommonMergeJoinOperator.java   |  22 +-
 .../hive/ql/exec/tez/MapRecordProcessor.java|  30 +-
 ql/src/test/queries/clientpositive/smb_cache.q  | 120 ++
 .../results/clientpositive/tez/smb_cache.q.out  | 383 +++
 5 files changed, 530 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d20afd34/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index e575eef..9b31441 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -376,6 +376,7 @@ minitez.query.files.shared=acid_globallimit.q,\
 
 
 minitez.query.files=bucket_map_join_tez1.q,\
+  smb_cache.q,\
   bucket_map_join_tez2.q,\
   constprog_dpp.q,\
   dynamic_partition_pruning.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/d20afd34/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
index 8693200..45bc0fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
@@ -20,15 +20,11 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.concurrent.Future;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -36,7 +32,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
 import org.apache.hadoop.hive.ql.exec.tez.RecordSource;
-import org.apache.hadoop.hive.ql.exec.tez.ReduceRecordSource;
 import org.apache.hadoop.hive.ql.exec.tez.TezContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc;
@@ -638,13 +633,16 @@ public class CommonMergeJoinOperator extends 
AbstractMapJoinOperator dummyOps =
-((TezContext) (MapredContext.get())).getDummyOpsMap();
-for (Entry connectOp : dummyOps.entrySet()) {
-  if (connectOp.getValue().getChildOperators() == null
-  || connectOp.getValue().getChildOperators().isEmpty()) {
-parentOperators.add(connectOp.getKey(), connectOp.getValue());
-connectOp.getValue().getChildOperators().add(this);
+
+if (parentOperators.size() == 1) {
+  Map dummyOps =
+  ((TezContext) (MapredContext.get())).getDummyOpsMap();
+  for (Entry connectOp : dummyOps.entrySet()) 
{
+if (connectOp.getValue().getChildOperators() == null
+|| connectOp.getValue().getChildOperators().isEmpty()) {
+  parentOperators.add(connectOp.getKey(), connectOp.getValue());
+  connectOp.getValue().getChildOperators().add(this);
+}
   }
 }
 super.initializeLocalWork(hconf);

http://git-wip-us.apache.org/repos/asf/hive/blob/d20afd34/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
index a4cf0b1..0584ad8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
@@ -27,7 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-i

hive git commit: HIVE-12948 : Update branch-2.0 before cutting the RC (Sergey Shelukhin, reviewed by Ashutosh Chauhan) ADDENDUM

2016-02-01 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-2.0 d20afd342 -> 73734a044


HIVE-12948 : Update branch-2.0 before cutting the RC (Sergey Shelukhin, 
reviewed by Ashutosh Chauhan) ADDENDUM


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/73734a04
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/73734a04
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/73734a04

Branch: refs/heads/branch-2.0
Commit: 73734a04418dfb5d35c2b8b9a30b1f54e47879c3
Parents: d20afd3
Author: Sergey Shelukhin 
Authored: Mon Feb 1 12:19:24 2016 -0800
Committer: Sergey Shelukhin 
Committed: Mon Feb 1 12:19:24 2016 -0800

--
 RELEASE_NOTES.txt | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/73734a04/RELEASE_NOTES.txt
--
diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt
index 9746892..eacea6f 100644
--- a/RELEASE_NOTES.txt
+++ b/RELEASE_NOTES.txt
@@ -802,13 +802,17 @@ Release Notes - Hive - Version 2.0.0
 * [HIVE-12864] - StackOverflowError parsing queries with very large 
predicates
 * [HIVE-12875] - Verify sem.getInputs() and sem.getOutputs()
 * [HIVE-12879] - RowResolver of Semijoin not updated in CalcitePlanner
+* [HIVE-12893] - Sorted dynamic partition does not work if subset of 
partition columns are constant folded
 * [HIVE-12904] - LLAP: deadlock in task scheduling
 * [HIVE-12905] - Issue with mapjoin in tez under certain conditions
 * [HIVE-12911] - PPD might get exercised even when flag is false if CBO is 
on
 * [HIVE-12915] - Tez session pool has concurrency issues during init
 * [HIVE-12920] - LLAP fails to start with java.lang.NumberFormatException
 * [HIVE-12926] - Another synchronization issue with tez/llap session pool 
in hs2
-
+* [HIVE-12931] - Shuffle tokens stay around forever in LLAP
+* [HIVE-12945] - Bucket pruning: bucketing for -ve hashcodes have 
historical issues
+* [HIVE-12947] - SMB join in tez has ClassCastException when container 
reuse is on
+* [HIVE-12964] - 
TestOperationLoggingAPIWithMr,TestOperationLoggingAPIWithTez fail on branch-2.0 
(with Java 7, at least)
 
 
 
@@ -997,6 +1001,7 @@ Release Notes - Hive - Version 2.0.0
 * [HIVE-12436] - Default hive.metastore.schema.verification to true
 * [HIVE-12520] - Fix schema_evol* tests on master
 * [HIVE-12658] - Task rejection by an llap daemon spams the log with 
RejectedExecutionExceptions
+* [HIVE-12934] - Refactor llap module structure to allow for a usable 
client
 
 
 ** Test
@@ -1019,6 +1024,7 @@ Release Notes - Hive - Version 2.0.0
 ** Wish
 * [HIVE-10427] - collect_list() and collect_set() should accept struct 
types as argument
 
+
 Release Notes - Hive - Version 1.2.1
 
 ** Sub-task



hive git commit: HIVE-12947: SMB join in tez has ClassCastException when container reuse is on (Vikram Dixit K, reviewed by Sergey Shelukhin)

2016-02-01 Thread vikram
Repository: hive
Updated Branches:
  refs/heads/master 5140e73ca -> d60e7c03d


HIVE-12947: SMB join in tez has ClassCastException when container reuse is on 
(Vikram Dixit K, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d60e7c03
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d60e7c03
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d60e7c03

Branch: refs/heads/master
Commit: d60e7c03d2a041f1238eb35c77ddb90891d8f999
Parents: 5140e73
Author: vikram 
Authored: Mon Feb 1 12:55:13 2016 -0800
Committer: vikram 
Committed: Mon Feb 1 12:55:13 2016 -0800

--
 .../test/resources/testconfiguration.properties |   1 +
 .../hive/ql/exec/CommonMergeJoinOperator.java   |  22 +-
 .../hive/ql/exec/tez/MapRecordProcessor.java|  30 +-
 ql/src/test/queries/clientpositive/smb_cache.q  | 120 +++
 .../results/clientpositive/tez/smb_cache.q.out  | 347 +++
 5 files changed, 494 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d60e7c03/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index dfd221e..fd2c924 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -376,6 +376,7 @@ minitez.query.files.shared=acid_globallimit.q,\
 
 
 minitez.query.files=bucket_map_join_tez1.q,\
+  smb_cache.q,\
   bucket_map_join_tez2.q,\
   constprog_dpp.q,\
   dynamic_partition_pruning.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/d60e7c03/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
index 8693200..45bc0fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
@@ -20,15 +20,11 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.concurrent.Future;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -36,7 +32,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
 import org.apache.hadoop.hive.ql.exec.tez.RecordSource;
-import org.apache.hadoop.hive.ql.exec.tez.ReduceRecordSource;
 import org.apache.hadoop.hive.ql.exec.tez.TezContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc;
@@ -638,13 +633,16 @@ public class CommonMergeJoinOperator extends 
AbstractMapJoinOperator dummyOps =
-((TezContext) (MapredContext.get())).getDummyOpsMap();
-for (Entry connectOp : dummyOps.entrySet()) {
-  if (connectOp.getValue().getChildOperators() == null
-  || connectOp.getValue().getChildOperators().isEmpty()) {
-parentOperators.add(connectOp.getKey(), connectOp.getValue());
-connectOp.getValue().getChildOperators().add(this);
+
+if (parentOperators.size() == 1) {
+  Map dummyOps =
+  ((TezContext) (MapredContext.get())).getDummyOpsMap();
+  for (Entry connectOp : dummyOps.entrySet()) 
{
+if (connectOp.getValue().getChildOperators() == null
+|| connectOp.getValue().getChildOperators().isEmpty()) {
+  parentOperators.add(connectOp.getKey(), connectOp.getValue());
+  connectOp.getValue().getChildOperators().add(this);
+}
   }
 }
 super.initializeLocalWork(hconf);

http://git-wip-us.apache.org/repos/asf/hive/blob/d60e7c03/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
index a4cf0b1..0584ad8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
@@ -27,7 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import j

[1/2] hive git commit: HIVE-12448. Change to tracking of dag status via dagIdentifier instead of dag name. (Siddharth Seth, reviewed by Sergey Shelukhin) ADDENDUM

2016-02-01 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-2.0 73734a044 -> b45f4c69f


HIVE-12448. Change to tracking of dag status via dagIdentifier instead of dag 
name. (Siddharth Seth, reviewed by Sergey Shelukhin) ADDENDUM


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e989bcd9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e989bcd9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e989bcd9

Branch: refs/heads/branch-2.0
Commit: e989bcd96080729a45215ddc17ed3a90e6a8dd80
Parents: 73734a0
Author: Sergey Shelukhin 
Authored: Mon Feb 1 12:36:24 2016 -0800
Committer: Sergey Shelukhin 
Committed: Mon Feb 1 12:36:24 2016 -0800

--
 .../llap/daemon/impl/TestQueryIdentifier.java | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e989bcd9/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
--
diff --git 
a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
 
b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
index 39a3865..a035573 100644
--- 
a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
+++ 
b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.hadoop.hive.llap.daemon.impl;
 
 import static org.junit.Assert.assertEquals;



[2/2] hive git commit: HIVE-12948 : Update branch-2.0 before cutting the RC (Sergey Shelukhin, reviewed by Ashutosh Chauhan) ADDENDUM2

2016-02-01 Thread sershe
HIVE-12948 : Update branch-2.0 before cutting the RC (Sergey Shelukhin, 
reviewed by Ashutosh Chauhan) ADDENDUM2


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b45f4c69
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b45f4c69
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b45f4c69

Branch: refs/heads/branch-2.0
Commit: b45f4c69f0c8853d9faea2dac8523517d28391d3
Parents: e989bcd
Author: Sergey Shelukhin 
Authored: Mon Feb 1 12:42:05 2016 -0800
Committer: Sergey Shelukhin 
Committed: Mon Feb 1 12:42:05 2016 -0800

--
 accumulo-handler/pom.xml| 2 +-
 ant/pom.xml | 2 +-
 beeline/pom.xml | 2 +-
 cli/pom.xml | 2 +-
 common/pom.xml  | 2 +-
 contrib/pom.xml | 2 +-
 hbase-handler/pom.xml   | 2 +-
 hcatalog/core/pom.xml   | 2 +-
 hcatalog/hcatalog-pig-adapter/pom.xml   | 2 +-
 hcatalog/pom.xml| 2 +-
 hcatalog/server-extensions/pom.xml  | 2 +-
 hcatalog/streaming/pom.xml  | 2 +-
 hcatalog/webhcat/java-client/pom.xml| 2 +-
 hcatalog/webhcat/svr/pom.xml| 2 +-
 hplsql/pom.xml  | 2 +-
 hwi/pom.xml | 2 +-
 itests/custom-serde/pom.xml | 2 +-
 itests/custom-udfs/pom.xml  | 2 +-
 itests/custom-udfs/udf-classloader-udf1/pom.xml | 2 +-
 itests/custom-udfs/udf-classloader-udf2/pom.xml | 2 +-
 itests/custom-udfs/udf-classloader-util/pom.xml | 2 +-
 itests/hcatalog-unit/pom.xml| 2 +-
 itests/hive-jmh/pom.xml | 2 +-
 itests/hive-minikdc/pom.xml | 2 +-
 itests/hive-unit-hadoop2/pom.xml| 2 +-
 itests/hive-unit/pom.xml| 2 +-
 itests/pom.xml  | 2 +-
 itests/qtest-accumulo/pom.xml   | 2 +-
 itests/qtest-spark/pom.xml  | 2 +-
 itests/qtest/pom.xml| 2 +-
 itests/test-serde/pom.xml   | 2 +-
 itests/util/pom.xml | 2 +-
 jdbc/pom.xml| 2 +-
 llap-client/pom.xml | 2 +-
 llap-common/pom.xml | 2 +-
 llap-server/pom.xml | 2 +-
 llap-tez/pom.xml| 2 +-
 metastore/pom.xml   | 2 +-
 odbc/pom.xml| 2 +-
 orc/pom.xml | 2 +-
 packaging/pom.xml   | 2 +-
 pom.xml | 2 +-
 ql/pom.xml  | 2 +-
 serde/pom.xml   | 2 +-
 service/pom.xml | 2 +-
 shims/0.23/pom.xml  | 2 +-
 shims/aggregator/pom.xml| 2 +-
 shims/common/pom.xml| 2 +-
 shims/pom.xml   | 2 +-
 shims/scheduler/pom.xml | 2 +-
 spark-client/pom.xml| 4 ++--
 storage-api/pom.xml | 2 +-
 testutils/pom.xml   | 2 +-
 53 files changed, 54 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b45f4c69/accumulo-handler/pom.xml
--
diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index 9d5185b..91ba4af 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-2.0.0-SNAPSHOT
+2.0.0
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b45f4c69/ant/pom.xml
--
diff --git a/ant/pom.xml b/ant/pom.xml
index 9706572..643bb00 100644
--- a/ant/pom.xml
+++ b/ant/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-2.0.0-SNAPSHOT
+2.0.0
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b45f4c69/beeline/pom.xml
--
diff --git a/beeline/pom.xml b/beeline/pom.xml
index a99fd54..63ab897 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-2.0.0-SNAPSHOT
+2.0.0
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b45f4c69/cli/pom.xml
---

[hive] Git Push Summary

2016-02-01 Thread sershe
Repository: hive
Updated Tags:  refs/tags/release-2.0.0-rc0 [created] f1a8a2a09


hive git commit: HIVE-12947: SMB mapjoin query runtime error "FileSinkOperator cannot be cast to org.apache.hadoop.hive.ql.exec.DummyStoreOperator" (Vikram Dixit K, reviewed by Sergey Shelukhin)

2016-02-01 Thread vikram
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 d649379f3 -> 0b9b1d84f


HIVE-12947: SMB mapjoin query runtime error "FileSinkOperator cannot be cast to 
org.apache.hadoop.hive.ql.exec.DummyStoreOperator" (Vikram Dixit K, reviewed by 
Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0b9b1d84
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0b9b1d84
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0b9b1d84

Branch: refs/heads/branch-1.2
Commit: 0b9b1d84f057e7fa76888ef8a9f94e753a0291df
Parents: d649379
Author: vikram 
Authored: Mon Feb 1 13:39:45 2016 -0800
Committer: vikram 
Committed: Mon Feb 1 13:39:45 2016 -0800

--
 .../test/resources/testconfiguration.properties |   1 +
 .../hive/ql/exec/CommonMergeJoinOperator.java   |  19 +-
 .../hive/ql/exec/tez/MapRecordProcessor.java|  25 +-
 ql/src/test/queries/clientpositive/smb_cache.q  | 120 ++
 .../results/clientpositive/tez/smb_cache.q.out  | 413 +++
 5 files changed, 559 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0b9b1d84/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 0b44c97..d2f9b58 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -317,6 +317,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
 
 
 minitez.query.files=bucket_map_join_tez1.q,\
+  smb_cache.q,\
   bucket_map_join_tez2.q,\
   dynamic_partition_pruning.q,\
   dynamic_partition_pruning_2.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/0b9b1d84/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
index 296a92d..d69d82e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -35,7 +34,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
 import org.apache.hadoop.hive.ql.exec.tez.RecordSource;
-import org.apache.hadoop.hive.ql.exec.tez.ReduceRecordSource;
 import org.apache.hadoop.hive.ql.exec.tez.TezContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc;
@@ -633,13 +631,16 @@ public class CommonMergeJoinOperator extends 
AbstractMapJoinOperator dummyOps =
-((TezContext) (MapredContext.get())).getDummyOpsMap();
-for (Entry connectOp : dummyOps.entrySet()) {
-  if (connectOp.getValue().getChildOperators() == null
-  || connectOp.getValue().getChildOperators().isEmpty()) {
-parentOperators.add(connectOp.getKey(), connectOp.getValue());
-connectOp.getValue().getChildOperators().add(this);
+
+if (parentOperators.size() == 1) {
+  Map dummyOps =
+  ((TezContext) (MapredContext.get())).getDummyOpsMap();
+  for (Entry connectOp : dummyOps.entrySet()) 
{
+if (connectOp.getValue().getChildOperators() == null
+|| connectOp.getValue().getChildOperators().isEmpty()) {
+  parentOperators.add(connectOp.getKey(), connectOp.getValue());
+  connectOp.getValue().getChildOperators().add(this);
+}
   }
 }
 super.initializeLocalWork(hconf);

http://git-wip-us.apache.org/repos/asf/hive/blob/0b9b1d84/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
index c6afad6..8b69e3c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
@@ -154,6 +154,7 @@ public class MapRecordProcessor extends RecordProcessor {
   mapOp.setExecContext(execContext);
 
   connectOps.clear();
+  boolean fromCache = false;
   if (mergeWorkList != null) {
 MapOperator mergeMapOp = null;
 for (BaseWork me

hive git commit: HIVE-12947: SMB mapjoin query runtime error "FileSinkOperator cannot be cast to org.apache.hadoop.hive.ql.exec.DummyStoreOperator" (Vikram Dixit K, reviewed by Sergey Shelukhin)

2016-02-01 Thread vikram
Repository: hive
Updated Branches:
  refs/heads/branch-1 e54f28ee0 -> 556806984


HIVE-12947: SMB mapjoin query runtime error "FileSinkOperator cannot be cast to 
org.apache.hadoop.hive.ql.exec.DummyStoreOperator" (Vikram Dixit K, reviewed by 
Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/55680698
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/55680698
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/55680698

Branch: refs/heads/branch-1
Commit: 556806984e97f3ace5adb2866262eabfd9959867
Parents: e54f28e
Author: vikram 
Authored: Mon Feb 1 13:39:45 2016 -0800
Committer: vikram 
Committed: Mon Feb 1 13:40:57 2016 -0800

--
 .../test/resources/testconfiguration.properties |   1 +
 .../hive/ql/exec/CommonMergeJoinOperator.java   |  19 +-
 .../hive/ql/exec/tez/MapRecordProcessor.java|  25 +-
 ql/src/test/queries/clientpositive/smb_cache.q  | 120 ++
 .../results/clientpositive/tez/smb_cache.q.out  | 413 +++
 5 files changed, 559 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/55680698/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 290cff2..f15de97 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -348,6 +348,7 @@ minitez.query.files.shared=acid_globallimit.q,\
 
 
 minitez.query.files=bucket_map_join_tez1.q,\
+  smb_cache.q,\
   bucket_map_join_tez2.q,\
   dynamic_partition_pruning.q,\
   dynamic_partition_pruning_2.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/55680698/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
index 0a77d31..c0fe670 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -35,7 +34,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
 import org.apache.hadoop.hive.ql.exec.tez.RecordSource;
-import org.apache.hadoop.hive.ql.exec.tez.ReduceRecordSource;
 import org.apache.hadoop.hive.ql.exec.tez.TezContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc;
@@ -633,13 +631,16 @@ public class CommonMergeJoinOperator extends 
AbstractMapJoinOperator dummyOps =
-((TezContext) (MapredContext.get())).getDummyOpsMap();
-for (Entry connectOp : dummyOps.entrySet()) {
-  if (connectOp.getValue().getChildOperators() == null
-  || connectOp.getValue().getChildOperators().isEmpty()) {
-parentOperators.add(connectOp.getKey(), connectOp.getValue());
-connectOp.getValue().getChildOperators().add(this);
+
+if (parentOperators.size() == 1) {
+  Map dummyOps =
+  ((TezContext) (MapredContext.get())).getDummyOpsMap();
+  for (Entry connectOp : dummyOps.entrySet()) 
{
+if (connectOp.getValue().getChildOperators() == null
+|| connectOp.getValue().getChildOperators().isEmpty()) {
+  parentOperators.add(connectOp.getKey(), connectOp.getValue());
+  connectOp.getValue().getChildOperators().add(this);
+}
   }
 }
 super.initializeLocalWork(hconf);

http://git-wip-us.apache.org/repos/asf/hive/blob/55680698/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
index c6afad6..8b69e3c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
@@ -154,6 +154,7 @@ public class MapRecordProcessor extends RecordProcessor {
   mapOp.setExecContext(execContext);
 
   connectOps.clear();
+  boolean fromCache = false;
   if (mergeWorkList != null) {
 MapOperator mergeMapOp = null;
 for (BaseWork mergeWo

hive git commit: HIVE-12448. Change to tracking of dag status via dagIdentifier instead of dag name. (Siddharth Seth, reviewed by Sergey Shelukhin) ADDENDUM

2016-02-01 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/master d60e7c03d -> 37026abd8


HIVE-12448. Change to tracking of dag status via dagIdentifier instead of dag 
name. (Siddharth Seth, reviewed by Sergey Shelukhin) ADDENDUM


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/37026abd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/37026abd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/37026abd

Branch: refs/heads/master
Commit: 37026abd869e18a447d1d9985bbeb3de4331f804
Parents: d60e7c0
Author: Sergey Shelukhin 
Authored: Mon Feb 1 12:36:24 2016 -0800
Committer: Sergey Shelukhin 
Committed: Mon Feb 1 15:24:33 2016 -0800

--
 .../llap/daemon/impl/TestQueryIdentifier.java | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/37026abd/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
--
diff --git 
a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
 
b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
index 39a3865..a035573 100644
--- 
a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
+++ 
b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.hadoop.hive.llap.daemon.impl;
 
 import static org.junit.Assert.assertEquals;



hive git commit: HIVE-12966 : Change some ZooKeeperHiveLockManager logs to debug (Mohit Sabharwal via Ashutosh Chauhan)

2016-02-01 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 37026abd8 -> ecf45b9ff


HIVE-12966 : Change some ZooKeeperHiveLockManager logs to debug (Mohit 
Sabharwal via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ecf45b9f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ecf45b9f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ecf45b9f

Branch: refs/heads/master
Commit: ecf45b9ffdb424234b6ae8fb8ceea4ee30c3a0d4
Parents: 37026ab
Author: Mohit Sabharwal 
Authored: Fri Jan 29 14:25:00 2016 -0800
Committer: Ashutosh Chauhan 
Committed: Mon Feb 1 22:40:57 2016 -0800

--
 .../hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java| 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ecf45b9f/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
index e10061b..14d0ef4 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
@@ -217,7 +217,8 @@ public class ZooKeeperHiveLockManager implements 
HiveLockManager {
   for (int pos = len-1; pos >= 0; pos--) {
 HiveLock hiveLock = hiveLocks.get(pos);
 try {
-  LOG.info(" about to release lock for " + 
hiveLock.getHiveLockObject().getName());
+  LOG.debug("About to release lock for {}",
+  hiveLock.getHiveLockObject().getName());
   unlock(hiveLock);
 } catch (LockException e) {
   // The lock may have been released. Ignore and continue
@@ -263,7 +264,8 @@ public class ZooKeeperHiveLockManager implements 
HiveLockManager {
 
   private ZooKeeperHiveLock lock (HiveLockObject key, HiveLockMode mode,
   boolean keepAlive, boolean parentCreated) throws LockException {
-LOG.info("Acquiring lock for " + key.getName() + " with mode " + 
key.getData().getLockMode());
+LOG.debug("Acquiring lock for {} with mode {}", key.getName(),
+key.getData().getLockMode());
 
 int tryNum = 0;
 ZooKeeperHiveLock ret = null;



hive git commit: HIVE-12834 : Fix to accept the arrow keys in BeeLine CLI (Shinichi Yamashita via Ashutosh Chauhan)

2016-02-01 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master ecf45b9ff -> cd9185cc4


HIVE-12834 : Fix to accept the arrow keys in BeeLine CLI (Shinichi Yamashita 
via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cd9185cc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cd9185cc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cd9185cc

Branch: refs/heads/master
Commit: cd9185cc47adb0aa2da4596841c9d1a6379ce16a
Parents: ecf45b9
Author: Shinichi Yamashita 
Authored: Sat Jan 9 20:00:00 2016 -0800
Committer: Ashutosh Chauhan 
Committed: Mon Feb 1 22:46:38 2016 -0800

--
 bin/beeline | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/cd9185cc/bin/beeline
--
diff --git a/bin/beeline b/bin/beeline
index e88ae4a..4360ae0 100644
--- a/bin/beeline
+++ b/bin/beeline
@@ -28,7 +28,7 @@ fi
 export HADOOP_USER_CLASSPATH_FIRST=true
 
 # If process is backgrounded, don't change terminal settings
-if [[ ! $(ps -o stat= -p $$) =~ + ]]; then
+if [[ ! $(ps -o stat= -p $$) =~ "+" ]]; then
   export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS 
-Djline.terminal=jline.UnsupportedTerminal"
 fi