http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/server/RemoteServiceSet.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/RemoteServiceSet.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/RemoteServiceSet.java
index 96e9d8e..72eb306 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/RemoteServiceSet.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/RemoteServiceSet.java
@@ -21,9 +21,6 @@ import java.io.Closeable;
 import java.io.IOException;
 
 import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.exec.cache.DistributedCache;
-import org.apache.drill.exec.cache.infinispan.ICache;
-import org.apache.drill.exec.cache.local.LocalCache;
 import org.apache.drill.exec.coord.ClusterCoordinator;
 import org.apache.drill.exec.coord.local.LocalClusterCoordinator;
 import org.apache.drill.exec.memory.BufferAllocator;
@@ -31,18 +28,13 @@ import org.apache.drill.exec.memory.BufferAllocator;
 public class RemoteServiceSet implements Closeable {
   static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(RemoteServiceSet.class);
 
-  private final DistributedCache cache;
   private final ClusterCoordinator coordinator;
 
-  public RemoteServiceSet(DistributedCache cache, ClusterCoordinator 
coordinator) {
+  public RemoteServiceSet(ClusterCoordinator coordinator) {
     super();
-    this.cache = cache;
     this.coordinator = coordinator;
   }
 
-  public DistributedCache getCache() {
-    return cache;
-  }
 
   public ClusterCoordinator getCoordinator() {
     return coordinator;
@@ -50,24 +42,15 @@ public class RemoteServiceSet implements Closeable {
 
   @Override
   public void close() throws IOException {
-    try {
-      cache.close();
-    } catch(Exception e) {
-      if (e instanceof IOException) {
-        throw (IOException) e;
-      }
-      throw new IOException("Failure while closing cache", e);
-    }
     coordinator.close();
   }
 
   public static RemoteServiceSet getLocalServiceSet() {
-    return new RemoteServiceSet(new LocalCache(), new 
LocalClusterCoordinator());
+    return new RemoteServiceSet(new LocalClusterCoordinator());
   }
 
   public static RemoteServiceSet getServiceSetWithFullCache(DrillConfig 
config, BufferAllocator allocator) throws Exception{
-    ICache c = new ICache(config, allocator, true);
-    return new RemoteServiceSet(c, new LocalClusterCoordinator());
+    return new RemoteServiceSet(new LocalClusterCoordinator());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
index 86b0a23..e802b44 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
@@ -25,7 +25,6 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.commons.collections.IteratorUtils;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.cache.DistributedCache.CacheConfig;
 import org.apache.drill.exec.compile.QueryClassLoader;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.apache.drill.exec.server.options.OptionValue.OptionType;
@@ -40,12 +39,6 @@ public class SystemOptionManager implements OptionManager {
 
   static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(SystemOptionManager.class);
 
-  public static final CacheConfig<String, OptionValue> OPTION_CACHE = 
CacheConfig //
-      .newBuilder(OptionValue.class) //
-      .name("sys.options") //
-      .jackson()
-      .build();
-
   private final OptionValidator[] VALIDATORS = {
       PlannerSettings.EXCHANGE,
       PlannerSettings.HASHAGG,

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/DrillbitIterator.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/DrillbitIterator.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/DrillbitIterator.java
index 844fd68..67d7cf9 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/DrillbitIterator.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/DrillbitIterator.java
@@ -26,9 +26,11 @@ public class DrillbitIterator implements Iterator<Object> {
   static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(DrillbitIterator.class);
 
   private Iterator<DrillbitEndpoint> endpoints;
+  private DrillbitEndpoint current;
 
   public DrillbitIterator(FragmentContext c) {
     this.endpoints = c.getDrillbitContext().getBits().iterator();
+    this.current = c.getIdentity();
   }
 
   public static class DrillbitInstance {
@@ -36,6 +38,7 @@ public class DrillbitIterator implements Iterator<Object> {
     public int user_port;
     public int control_port;
     public int data_port;
+    public boolean current;
   }
 
   @Override
@@ -47,6 +50,7 @@ public class DrillbitIterator implements Iterator<Object> {
   public Object next() {
     DrillbitEndpoint ep = endpoints.next();
     DrillbitInstance i = new DrillbitInstance();
+    i.current = ep.equals(current);
     i.host = ep.getAddress();
     i.user_port = ep.getUserPort();
     i.control_port = ep.getControlPort();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTable.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTable.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTable.java
index 4301f12..0bf2156 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTable.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTable.java
@@ -24,7 +24,8 @@ import org.eigenbase.reltype.RelDataTypeFactory;
 
 public enum SystemTable {
   OPTION("options", OptionValue.class),
-  DRILLBITS("drillbits", DrillbitIterator.DrillbitInstance.class)
+  DRILLBITS("drillbits", DrillbitIterator.DrillbitInstance.class),
+  VERSION("version", VersionIterator.VersionInfo.class)
   ;
 
   private final PojoDataType type;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTablePlugin.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTablePlugin.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTablePlugin.java
index 743ab53..5a25724 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTablePlugin.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTablePlugin.java
@@ -66,6 +66,8 @@ public class SystemTablePlugin extends AbstractStoragePlugin{
 
   public Iterator<Object> getRecordIterator(FragmentContext context, 
SystemTable table){
     switch(table){
+    case VERSION:
+      return new VersionIterator();
     case DRILLBITS:
       return new DrillbitIterator(context);
     case OPTION:

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/VersionIterator.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/VersionIterator.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/VersionIterator.java
new file mode 100644
index 0000000..ffc740a
--- /dev/null
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/VersionIterator.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.sys;
+
+import java.io.IOException;
+import java.net.URL;
+import java.util.Iterator;
+import java.util.Properties;
+
+import com.google.common.io.Resources;
+
+public class VersionIterator implements Iterator<Object>{
+  static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(VersionIterator.class);
+
+  public boolean beforeFirst = true;
+
+  public static class VersionInfo {
+    public String commit_id = "Unknown";
+    public String commit_message = "";
+    public String commit_time = "";
+    public String build_email = "Unknown";
+    public String build_time = "";
+
+    public VersionInfo(){
+      URL u = Resources.getResource("git.properties");
+      if(u != null){
+        try {
+          Properties p = new Properties();
+          p.load(Resources.newInputStreamSupplier(u).getInput());
+          commit_id = p.getProperty("git.commit.id");
+          build_email = p.getProperty("git.build.user.email");
+          commit_time = p.getProperty("git.commit.time");
+          build_time = p.getProperty("git.build.time");
+          commit_message = p.getProperty("git.commit.message.short");
+
+        } catch (IOException e) {
+          logger.warn("Failure while trying to load \"git.properties\" file.", 
e);
+        }
+      }
+
+    }
+  }
+  @Override
+  public boolean hasNext() {
+    return beforeFirst;
+  }
+
+  @Override
+  public Object next() {
+    if(!beforeFirst){
+      throw new IllegalStateException();
+    }
+    beforeFirst = false;
+    return new VersionInfo();
+  }
+
+  @Override
+  public void remove() {
+    throw new UnsupportedOperationException();
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/work/ErrorHelper.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/ErrorHelper.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/ErrorHelper.java
index 51b4e32..6a9cab5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/ErrorHelper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/ErrorHelper.java
@@ -17,68 +17,207 @@
  */
 package org.apache.drill.exec.work;
 
-import java.io.PrintWriter;
-import java.io.StringWriter;
 import java.util.UUID;
+import java.util.regex.Pattern;
 
-import org.apache.drill.exec.planner.sql.parser.impl.ParseException;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
 import org.apache.drill.exec.proto.UserBitShared.DrillPBError;
-import org.eigenbase.sql.parser.SqlParseException;
+import org.apache.drill.exec.proto.UserBitShared.ExceptionWrapper;
+import org.apache.drill.exec.proto.UserBitShared.StackTraceElementWrapper;
+import org.apache.drill.exec.rpc.RemoteRpcException;
 import org.slf4j.Logger;
 
 
 public class ErrorHelper {
+  static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(ErrorHelper.class);
 
-//  public static DrillPBError logAndConvertError(DrillbitEndpoint endpoint, 
String message, Throwable t, Logger logger) {
-//    return logAndConvertError(endpoint, message, t, logger, true);
-//  }
+  final static Pattern IGNORE= Pattern.compile("^(sun|com\\.sun|java).*");
 
-  public static DrillPBError logAndConvertError(DrillbitEndpoint endpoint, 
String message, Throwable t, Logger logger,
-                                                boolean verbose) {
+  /**
+   * Manages message conversion before returning to user.  If the exception is 
a remote rpc exception, will simply return user friendly message.  Otherwise, 
will log and return.
+   * TODO: this should really be done client side but we don't currently have 
any way to maintain session state on the client.
+   *
+   * @param endpoint
+   * @param message
+   * @param t
+   * @param logger
+   * @param verbose
+   * @return
+   */
+  public static DrillPBError logAndConvertMessageError(DrillbitEndpoint 
endpoint, String message, Throwable t, Logger logger, boolean verbose) {
+
+    DrillPBError baseError = t instanceof RemoteRpcException ? 
((RemoteRpcException) t).getRemoteError() : logAndConvertError(endpoint, 
message, t, logger);
+    String userMessage = getErrorMessage(baseError, verbose);
+    return DrillPBError.newBuilder() //
+      .setEndpoint(baseError.getEndpoint()) //
+      .setErrorId(baseError.getErrorId()) //
+      .setMessage(userMessage) //
+      .build();
+  }
+
+  public static DrillPBError logAndConvertError(DrillbitEndpoint endpoint, 
String message, Throwable t, Logger logger) {
     String id = UUID.randomUUID().toString();
     DrillPBError.Builder builder = DrillPBError.newBuilder();
     builder.setEndpoint(endpoint);
     builder.setErrorId(id);
-    StringBuilder sb = new StringBuilder();
-    if (message != null) {
-      sb.append(message).append(" ");
+    builder.setMessage(message);
+    builder.setException(getWrapper(t));
+
+    // record the error to the log for later reference.
+    logger.error("Error {}: {}", id, message, t);
+
+    return builder.build();
+  }
+
+  public static ExceptionWrapper getWrapper(Throwable ex){
+    return getWrapperBuilder(ex).build();
+  }
+
+  public static ExceptionWrapper.Builder getWrapperBuilder(Throwable ex){
+    return getWrapperBuilder(ex, false);
+  }
+
+  public static ExceptionWrapper.Builder getWrapperBuilder(Throwable ex, 
boolean includeAllStack){
+
+
+
+    ExceptionWrapper.Builder ew = ExceptionWrapper.newBuilder();
+    if(ex.getMessage() != null) {
+      ew.setMessage(ex.getMessage());
+    }
+    ew.setExceptionClass(ex.getClass().getCanonicalName());
+    boolean isHidden = false;
+    StackTraceElementWrapper[] wrappers = new 
StackTraceElementWrapper[ex.getStackTrace().length];
+    for(int i = 0; i < wrappers.length; i++){
+      StackTraceElement ele = ex.getStackTrace()[i];
+      if(include(ele, includeAllStack)){
+        if(isHidden){
+          isHidden = false;
+        }
+        ew.addStackTrace(getSTWrapper(ele));
+      }else{
+        if(!isHidden){
+          isHidden = true;
+          ew.addStackTrace(getEmptyST());
+        }
+      }
+
+    }
+
+    if(ex.getCause() != null && ex.getCause() != ex){
+      ew.setCause(getWrapper(ex.getCause()));
+    }
+    return ew;
+  }
+
+  private static boolean include(StackTraceElement ele, boolean 
includeAllStack){
+    if(includeAllStack) {
+      return true;
     }
-    sb.append("%s ").append("[").append(id).append("]");
-    if (verbose) {
-      sb.append("\n")
-        .append("Node details: ")
-        .append(endpoint.getAddress())
-        .append(":").append(endpoint.getControlPort())
-        .append("/").append(endpoint.getDataPort());
+    return !(IGNORE.matcher(ele.getClassName()).matches());
+  }
+
+  private static StackTraceElementWrapper.Builder getEmptyST(){
+    StackTraceElementWrapper.Builder w = StackTraceElementWrapper.newBuilder();
+    w.setClassName("...");
+    w.setIsNativeMethod(false);
+    w.setLineNumber(0);
+    w.setMethodName("...");
+    return w;
+  }
+  public static StackTraceElementWrapper.Builder 
getSTWrapper(StackTraceElement ele){
+    StackTraceElementWrapper.Builder w = StackTraceElementWrapper.newBuilder();
+    w.setClassName(ele.getClassName());
+    if(ele.getFileName() != null) {
+      w.setFileName(ele.getFileName());
     }
+    w.setIsNativeMethod(ele.isNativeMethod());
+    w.setLineNumber(ele.getLineNumber());
+    w.setMethodName(ele.getMethodName());
+    return w;
+  }
+
+
+  public static String getErrorMessage(final DrillPBError error, final boolean 
verbose) {
+
+    String finalMessage = null;
+    ExceptionWrapper ex = error.getException();
+    StringBuilder sb = new StringBuilder();
+
+
+
+    sb //
+      .append("[ ") //
+      .append(error.getErrorId()) //
+      .append(" on ")
+      .append(error.getEndpoint().getAddress())
+      .append(":").append(error.getEndpoint().getUserPort())
+      .append(" ]\n");
+
+    boolean cause = false;
+    while(ex != null){
+
+      if(ex.hasMessage()){
+        finalMessage = ex.getMessage();
+      }
+
+      if(verbose){
+        sb.append("  ");
+
+        if(cause){
+          sb.append("Caused By ");
+        }
+
+        sb.append("(");
+        sb.append(ex.getExceptionClass());
+        sb.append(") ");
+        sb.append(ex.getMessage());
+        sb.append("\n");
+        for(int i = 0; i < ex.getStackTraceCount(); i++){
+          StackTraceElementWrapper st = ex.getStackTrace(i);
+          sb.append("    ");
+          sb.append(st.getClassName());
+          sb.append('.');
+          sb.append(st.getMethodName());
+          sb.append("():");
+          sb.append(st.getLineNumber());
+          sb.append("\n");
+        }
+        cause = true;
+      }
+
+      ex = ex.hasCause() ? ex.getCause() : null;
+
 
-    if (verbose) {
-      StringWriter errors = new StringWriter();
-      errors.write("\n");
-      t.printStackTrace(new PrintWriter(errors));
-      sb.append(errors);
     }
 
-    Throwable original = t;
-    Throwable rootCause = null;
-    while (true) {
-      rootCause = t;
-      if (t.getCause() == null || t.getCause() == t
-        || (t instanceof SqlParseException && t.getCause() instanceof 
ParseException)) {
-        break;
+    StringBuilder msg = new StringBuilder();
+
+    if (error.hasMessage()){
+      msg.append(error.getMessage());
+      if(finalMessage != null){
+        msg.append(", ");
+        msg.append(finalMessage);
+        msg.append(' ');
       }
-      t = t.getCause();
+    }else if(finalMessage != null){
+      msg.append(finalMessage);
+      msg.append(' ');
+    }else{
+      msg.append("Error ");
     }
 
-    String finalMsg = rootCause.getMessage() == null ? original.getMessage() : 
rootCause.getMessage();
-    builder.setMessage(String.format(sb.toString(), finalMsg));
-    builder.setErrorType(0);
+    msg.append(sb);
 
-    // record the error to the log for later reference.
-    logger.error("Error {}: {}", id, message, t);
+    return msg.toString();
+  }
+
+  public static void main(String[] args ){
+    DrillPBError e = 
logAndConvertError(DrillbitEndpoint.newBuilder().setAddress("host1").setControlPort(1234).build(),
 "RpcFailure", new Exception("Excep 1", new Exception("excep2")), logger);
+    System.out.println(getErrorMessage(e, false));
+    System.out.println("\n\n\n");
+    System.out.println(getErrorMessage(e, true));
 
-    return builder.build();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java
index 0407361..d85abd5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java
@@ -28,7 +28,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.drill.exec.cache.DistributedCache;
 import org.apache.drill.exec.coord.ClusterCoordinator;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
@@ -91,9 +90,9 @@ public class WorkManager implements Closeable {
     this.dataHandler = new DataResponseHandlerImpl(bee);
   }
 
-  public void start(DrillbitEndpoint endpoint, DistributedCache cache, 
Controller controller,
+  public void start(DrillbitEndpoint endpoint, Controller controller,
       DataConnectionCreator data, ClusterCoordinator coord, PStoreProvider 
provider) {
-    this.dContext = new DrillbitContext(endpoint, bContext, coord, controller, 
data, cache, workBus, provider);
+    this.dContext = new DrillbitContext(endpoint, bContext, coord, controller, 
data, workBus, provider);
     // executor = 
Executors.newFixedThreadPool(dContext.getConfig().getInt(ExecConstants.EXECUTOR_THREADS)
     executor = Executors.newCachedThreadPool(new 
NamedThreadFactory("WorkManager-"));
     eventThread.start();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlHandlerImpl.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlHandlerImpl.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlHandlerImpl.java
index 0ac606c..63d8c71 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlHandlerImpl.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlHandlerImpl.java
@@ -22,11 +22,11 @@ import io.netty.buffer.ByteBuf;
 
 import java.io.IOException;
 
-import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.physical.base.FragmentRoot;
 import org.apache.drill.exec.proto.BitControl.FinishedReceiver;
 import org.apache.drill.exec.proto.BitControl.FragmentStatus;
+import org.apache.drill.exec.proto.BitControl.InitializeFragments;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.proto.BitControl.RpcType;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
@@ -38,6 +38,7 @@ import org.apache.drill.exec.rpc.Acks;
 import org.apache.drill.exec.rpc.Response;
 import org.apache.drill.exec.rpc.RpcConstants;
 import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.exec.rpc.UserRpcException;
 import org.apache.drill.exec.rpc.control.ControlConnection;
 import org.apache.drill.exec.rpc.control.ControlTunnel;
 import org.apache.drill.exec.rpc.data.DataRpcConfig;
@@ -46,6 +47,7 @@ import org.apache.drill.exec.work.foreman.Foreman;
 import org.apache.drill.exec.work.foreman.QueryStatus;
 import org.apache.drill.exec.work.fragment.FragmentExecutor;
 import org.apache.drill.exec.work.fragment.FragmentManager;
+import org.apache.drill.exec.work.fragment.NonRootFragmentManager;
 import org.apache.drill.exec.work.fragment.NonRootStatusReporter;
 
 public class ControlHandlerImpl implements ControlMessageHandler {
@@ -81,16 +83,12 @@ public class ControlHandlerImpl implements 
ControlMessageHandler {
       // TODO: Support a type of message that has no response.
       return DataRpcConfig.OK;
 
-    case RpcType.REQ_INIATILIZE_FRAGMENT_VALUE:
-      PlanFragment fragment = get(pBody, PlanFragment.PARSER);
-      try {
-        startNewRemoteFragment(fragment);
-        return DataRpcConfig.OK;
-
-      } catch (ExecutionSetupException e) {
-        logger.error("Failure while attempting to start remote fragment.", 
fragment);
-        return new Response(RpcType.ACK, Acks.FAIL);
+    case RpcType.REQ_INIATILIZE_FRAGMENTS_VALUE:
+      InitializeFragments fragments = get(pBody, InitializeFragments.PARSER);
+      for(int i =0; i < fragments.getFragmentCount(); i++){
+        startNewRemoteFragment(fragments.getFragment(i));
       }
+      return DataRpcConfig.OK;
 
     case RpcType.REQ_QUERY_STATUS_VALUE:
       QueryId queryId = get(pBody, QueryId.PARSER);
@@ -113,25 +111,29 @@ public class ControlHandlerImpl implements 
ControlMessageHandler {
 
   }
 
-  /* (non-Javadoc)
-   * @see 
org.apache.drill.exec.work.batch.BitComHandler#startNewRemoteFragment(org.apache.drill.exec.proto.ExecProtos.PlanFragment)
-   */
   @Override
-  public void startNewRemoteFragment(PlanFragment fragment) throws 
ExecutionSetupException{
+  public void startNewRemoteFragment(PlanFragment fragment) throws 
UserRpcException {
     logger.debug("Received remote fragment start instruction", fragment);
-    FragmentContext context = new FragmentContext(bee.getContext(), fragment, 
null, bee.getContext().getFunctionImplementationRegistry());
-    ControlTunnel tunnel = 
bee.getContext().getController().getTunnel(fragment.getForeman());
 
-    NonRootStatusReporter listener = new NonRootStatusReporter(context, 
tunnel);
     try {
-      FragmentRoot rootOperator = 
bee.getContext().getPlanReader().readFragmentOperator(fragment.getFragmentJson());
-      FragmentExecutor fr = new FragmentExecutor(context, bee, rootOperator, 
listener);
-      bee.addFragmentRunner(fr);
+      // we either need to start the fragment if it is a leaf fragment, or set 
up a fragment manager if it is non leaf.
+      if(fragment.getLeafFragment()){
+        FragmentContext context = new FragmentContext(bee.getContext(), 
fragment, null, bee.getContext().getFunctionImplementationRegistry());
+        ControlTunnel tunnel = 
bee.getContext().getController().getTunnel(fragment.getForeman());
+        NonRootStatusReporter listener = new NonRootStatusReporter(context, 
tunnel);
+        FragmentRoot rootOperator = 
bee.getContext().getPlanReader().readFragmentOperator(fragment.getFragmentJson());
+        FragmentExecutor fr = new FragmentExecutor(context, bee, rootOperator, 
listener);
+        bee.addFragmentRunner(fr);
+      }else{ // isIntermediate, store for incoming data.
+        NonRootFragmentManager manager = new NonRootFragmentManager(fragment, 
bee);
+        bee.getContext().getWorkBus().setFragmentManager(manager);
+      }
+
     } catch (Exception e) {
-      listener.fail(fragment.getHandle(), "Failure due to uncaught exception", 
e);
+        throw new UserRpcException(bee.getContext().getEndpoint(), "Failure 
while trying to start remote fragment", e);
     } catch (OutOfMemoryError t) {
       if (t.getMessage().startsWith("Direct buffer")) {
-        listener.fail(fragment.getHandle(), "Failure due to error", t);
+        throw new UserRpcException(bee.getContext().getEndpoint(), "Out of 
direct memory while trying to start remote fragment", t);
       } else {
         throw t;
       }
@@ -144,7 +146,7 @@ public class ControlHandlerImpl implements 
ControlMessageHandler {
    */
   @Override
   public Ack cancelFragment(FragmentHandle handle) {
-    FragmentManager manager = 
bee.getContext().getWorkBus().getFragmentManager(handle);
+    FragmentManager manager = 
bee.getContext().getWorkBus().getFragmentManagerIfExists(handle);
     if (manager != null) {
       // try remote fragment cancel.
       manager.cancel();
@@ -160,7 +162,7 @@ public class ControlHandlerImpl implements 
ControlMessageHandler {
   }
 
   public Ack receivingFragmentFinished(FinishedReceiver finishedReceiver) {
-    FragmentManager manager = 
bee.getContext().getWorkBus().getFragmentManager(finishedReceiver.getSender());
+    FragmentManager manager = 
bee.getContext().getWorkBus().getFragmentManagerIfExists(finishedReceiver.getSender());
 
     FragmentExecutor executor;
     if (manager != null) {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlMessageHandler.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlMessageHandler.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlMessageHandler.java
index d00478b..c5d78cc 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlMessageHandler.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/batch/ControlMessageHandler.java
@@ -19,13 +19,12 @@ package org.apache.drill.exec.work.batch;
 
 import io.netty.buffer.ByteBuf;
 
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.exec.memory.OutOfMemoryException;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
 import org.apache.drill.exec.proto.GeneralRPCProtos.Ack;
 import org.apache.drill.exec.rpc.Response;
 import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.exec.rpc.UserRpcException;
 import org.apache.drill.exec.rpc.control.ControlConnection;
 
 public interface ControlMessageHandler {
@@ -33,7 +32,7 @@ public interface ControlMessageHandler {
   public abstract Response handle(ControlConnection connection, int rpcType, 
ByteBuf pBody, ByteBuf dBody)
       throws RpcException;
 
-  public abstract void startNewRemoteFragment(PlanFragment fragment) throws 
OutOfMemoryException, ExecutionSetupException;
+  public abstract void startNewRemoteFragment(PlanFragment fragment) throws 
UserRpcException;
 
   public abstract Ack cancelFragment(FragmentHandle handle);
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
index 0a34a22..2e4f43d 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
@@ -21,20 +21,12 @@ import io.netty.buffer.ByteBuf;
 
 import java.io.Closeable;
 import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
 
 import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.logical.LogicalPlan;
 import org.apache.drill.common.logical.PlanProperties.Generator.ResultMode;
 import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.cache.DistributedCache.CacheConfig;
-import org.apache.drill.exec.cache.DistributedCache.SerializationMode;
 import org.apache.drill.exec.coord.DistributedSemaphore;
 import org.apache.drill.exec.coord.DistributedSemaphore.DistributedLease;
 import org.apache.drill.exec.exception.FragmentSetupException;
@@ -53,8 +45,6 @@ import 
org.apache.drill.exec.planner.fragment.SimpleParallelizer;
 import org.apache.drill.exec.planner.fragment.StatsCollector;
 import org.apache.drill.exec.planner.sql.DirectPlan;
 import org.apache.drill.exec.planner.sql.DrillSqlWorker;
-import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
 import org.apache.drill.exec.proto.GeneralRPCProtos.Ack;
 import org.apache.drill.exec.proto.UserBitShared.DrillPBError;
 import org.apache.drill.exec.proto.UserBitShared.QueryId;
@@ -74,20 +64,12 @@ import org.apache.drill.exec.work.ErrorHelper;
 import org.apache.drill.exec.work.QueryWorkUnit;
 import org.apache.drill.exec.work.WorkManager.WorkerBee;
 
-import com.google.common.collect.Lists;
-
 /**
  * Foreman manages all queries where this is the driving/root node.
  */
 public class Foreman implements Runnable, Closeable, Comparable<Object>{
   static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(Foreman.class);
 
-
-  public static final CacheConfig<FragmentHandle, PlanFragment> FRAGMENT_CACHE 
= CacheConfig //
-      .newBuilder(FragmentHandle.class, PlanFragment.class) //
-      .mode(SerializationMode.PROTOBUF) //
-      .build();
-
   private QueryId queryId;
   private RunQuery queryRequest;
   private QueryContext context;
@@ -159,8 +141,7 @@ public class Foreman implements Runnable, Closeable, 
Comparable<Object>{
       }
     }
 
-    boolean verbose = 
getContext().getOptions().getOption(ExecConstants.ENABLE_VERBOSE_ERRORS_KEY).bool_val;
-    DrillPBError error = 
ErrorHelper.logAndConvertError(context.getCurrentEndpoint(), message, t, 
logger, verbose);
+    DrillPBError error = 
ErrorHelper.logAndConvertError(context.getCurrentEndpoint(), message, t, 
logger);
     QueryResult result = QueryResult //
         .newBuilder() //
         .addError(error) //
@@ -376,38 +357,13 @@ public class Foreman implements Runnable, Closeable, 
Comparable<Object>{
           queryId, context.getActiveEndpoints(), context.getPlanReader(), 
rootFragment, planningSet, initiatingClient.getSession());
 
       
this.context.getWorkBus().setFragmentStatusListener(work.getRootFragment().getHandle().getQueryId(),
 fragmentManager);
-      List<PlanFragment> leafFragments = Lists.newArrayList();
-      List<PlanFragment> intermediateFragments = Lists.newArrayList();
-
-      // store fragments in distributed grid.
-      logger.debug("Storing fragments");
-      List<Future<PlanFragment>> queue = new LinkedList<>();
-      for (PlanFragment f : work.getFragments()) {
-        // store all fragments in grid since they are part of handshake.
-
-        queue.add(context.getCache().getMap(FRAGMENT_CACHE).put(f.getHandle(), 
f));
-        if (f.getLeafFragment()) {
-          leafFragments.add(f);
-        } else {
-          intermediateFragments.add(f);
-        }
-      }
-
-      for (Future<PlanFragment> f : queue) {
-        try {
-          f.get(10, TimeUnit.SECONDS);
-        } catch (InterruptedException | ExecutionException | TimeoutException 
e) {
-          throw new ExecutionSetupException("failure while storing plan 
fragments", e);
-        }
-      }
 
-      int totalFragments = 1 + intermediateFragments.size() + 
leafFragments.size();
+      int totalFragments = 1 + work.getFragments().size();;
       fragmentManager.getStatus().setTotalFragments(totalFragments);
       fragmentManager.getStatus().updateCache();
-      logger.debug("Fragments stored.");
 
       logger.debug("Submitting fragments to run.");
-      fragmentManager.runFragments(bee, work.getRootFragment(), 
work.getRootOperator(), initiatingClient, leafFragments, intermediateFragments);
+      fragmentManager.runFragments(bee, work.getRootFragment(), 
work.getRootOperator(), initiatingClient, work.getFragments());
 
       logger.debug("Fragments running.");
       state.updateState(QueryState.PENDING, QueryState.RUNNING);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/FragmentData.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/FragmentData.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/FragmentData.java
index b9b3de0..52fd0a9 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/FragmentData.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/FragmentData.java
@@ -31,7 +31,11 @@ public class FragmentData {
 
   public FragmentData(FragmentHandle handle, DrillbitEndpoint endpoint, 
boolean isLocal) {
     super();
-    MinorFragmentProfile f = 
MinorFragmentProfile.newBuilder().setState(FragmentState.SENDING).setMinorFragmentId(handle.getMinorFragmentId()).build();
+    MinorFragmentProfile f = MinorFragmentProfile.newBuilder() //
+        .setState(FragmentState.SENDING) //
+        .setMinorFragmentId(handle.getMinorFragmentId()) //
+        .setEndpoint(endpoint) //
+        .build();
     this.status = 
FragmentStatus.newBuilder().setHandle(handle).setProfile(f).build();
     this.endpoint = endpoint;
     this.isLocal = isLocal;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
index a01a5f6..61f3d82 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
@@ -19,6 +19,7 @@ package org.apache.drill.exec.work.foreman;
 
 import io.netty.buffer.ByteBuf;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
@@ -28,6 +29,7 @@ import 
org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.physical.base.FragmentRoot;
 import org.apache.drill.exec.proto.BitControl.FragmentStatus;
+import org.apache.drill.exec.proto.BitControl.InitializeFragments;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
@@ -50,6 +52,9 @@ import 
org.apache.drill.exec.work.fragment.AbstractStatusReporter;
 import org.apache.drill.exec.work.fragment.FragmentExecutor;
 import org.apache.drill.exec.work.fragment.RootFragmentManager;
 
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Multimap;
+
 /**
  * Each Foreman holds its own fragment manager.  This manages the events 
associated with execution of a particular query across all fragments.
  */
@@ -87,10 +92,9 @@ public class QueryManager implements FragmentStatusListener{
   }
 
   public void runFragments(WorkerBee bee, PlanFragment rootFragment, 
FragmentRoot rootOperator,
-                           UserClientConnection rootClient, List<PlanFragment> 
leafFragments,
-                           List<PlanFragment> intermediateFragments) throws 
ExecutionSetupException{
+                           UserClientConnection rootClient, List<PlanFragment> 
nonRootFragments) throws ExecutionSetupException{
     logger.debug("Setting up fragment runs.");
-    remainingFragmentCount.set(intermediateFragments.size() + 
leafFragments.size() + 1);
+    remainingFragmentCount.set(nonRootFragments.size() + 1);
     assert queryId == rootFragment.getHandle().getQueryId();
     workBus = bee.getContext().getWorkBus();
 
@@ -103,7 +107,7 @@ public class QueryManager implements FragmentStatusListener{
       logger.debug("Setting buffers on root context.");
       rootContext.setBuffers(buffers);
       // add fragment to local node.
-      status.add(new FragmentData(rootFragment.getHandle(), null, true));
+      status.add(new FragmentData(rootFragment.getHandle(), 
rootFragment.getAssignment(), true));
       logger.debug("Fragment added to local node.");
       rootRunner = new FragmentExecutor(rootContext, bee, rootOperator, new 
RootStatusHandler(rootContext, rootFragment));
       RootFragmentManager fragmentManager = new 
RootFragmentManager(rootFragment.getHandle(), buffers, rootRunner);
@@ -113,19 +117,24 @@ public class QueryManager implements 
FragmentStatusListener{
         bee.addFragmentRunner(fragmentManager.getRunnable());
       }else{
         // if we do, record the fragment manager in the workBus.
-        workBus.setRootFragmentManager(fragmentManager);
+        workBus.setFragmentManager(fragmentManager);
       }
     }
 
-    // keep track of intermediate fragments (not root or leaf)
-    for (PlanFragment f : intermediateFragments) {
+    Multimap<DrillbitEndpoint, PlanFragment> fragmentMap = 
ArrayListMultimap.create();
+
+    // record all fragments for status purposes.
+    for (PlanFragment f : nonRootFragments) {
       logger.debug("Tracking intermediate remote node {} with data {}", 
f.getAssignment(), f.getFragmentJson());
       status.add(new FragmentData(f.getHandle(), f.getAssignment(), false));
+      fragmentMap.put(f.getAssignment(), f);
     }
 
+
+
     // send remote (leaf) fragments.
-    for (PlanFragment f : leafFragments) {
-      sendRemoteFragment(f);
+    for (DrillbitEndpoint ep : fragmentMap.keySet()) {
+      sendRemoteFragments(ep, fragmentMap.get(ep));
     }
 
     bee.getContext().getAllocator().resetFragmentLimits();
@@ -137,11 +146,16 @@ public class QueryManager implements 
FragmentStatusListener{
     }
   }
 
-  private void sendRemoteFragment(PlanFragment fragment){
-    logger.debug("Sending remote fragment to node {} with data {}", 
fragment.getAssignment(), fragment.getFragmentJson());
-    status.add(new FragmentData(fragment.getHandle(), 
fragment.getAssignment(), false));
-    FragmentSubmitListener listener = new 
FragmentSubmitListener(fragment.getAssignment(), fragment);
-    controller.getTunnel(fragment.getAssignment()).sendFragment(listener, 
fragment);
+  private void sendRemoteFragments(DrillbitEndpoint assignment, 
Collection<PlanFragment> fragments){
+    InitializeFragments.Builder fb = InitializeFragments.newBuilder();
+    for(PlanFragment f : fragments){
+      fb.addFragment(f);
+    }
+    InitializeFragments initFrags = fb.build();
+
+    logger.debug("Sending remote fragments to node {} with data {}", 
assignment, initFrags);
+    FragmentSubmitListener listener = new FragmentSubmitListener(assignment, 
initFrags);
+    controller.getTunnel(assignment).sendFragments(listener, initFrags);
   }
 
 
@@ -255,16 +269,17 @@ public class QueryManager implements 
FragmentStatusListener{
 
   }
 
-  public RpcOutcomeListener<Ack> getSubmitListener(DrillbitEndpoint endpoint, 
PlanFragment value){
+  public RpcOutcomeListener<Ack> getSubmitListener(DrillbitEndpoint endpoint, 
InitializeFragments value){
     return new FragmentSubmitListener(endpoint, value);
   }
 
-  private class FragmentSubmitListener extends EndpointListener<Ack, 
PlanFragment>{
+  private class FragmentSubmitListener extends EndpointListener<Ack, 
InitializeFragments>{
 
-    public FragmentSubmitListener(DrillbitEndpoint endpoint, PlanFragment 
value) {
+    public FragmentSubmitListener(DrillbitEndpoint endpoint, 
InitializeFragments value) {
       super(endpoint, value);
     }
 
+
     @Override
     public void failed(RpcException ex) {
       logger.debug("Failure while sending fragment.  Stopping query.", ex);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/AbstractStatusReporter.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/AbstractStatusReporter.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/AbstractStatusReporter.java
index 1983ebb..422ebed 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/AbstractStatusReporter.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/AbstractStatusReporter.java
@@ -45,8 +45,7 @@ public abstract class AbstractStatusReporter implements 
StatusReporter{
     context.getStats().addMetricsToStatus(b);
     b.setState(state);
     if(t != null){
-      boolean verbose = 
context.getOptions().getOption(ExecConstants.ENABLE_VERBOSE_ERRORS_KEY).bool_val;
-      b.setError(ErrorHelper.logAndConvertError(context.getIdentity(), 
message, t, logger, verbose));
+      b.setError(ErrorHelper.logAndConvertError(context.getIdentity(), 
message, t, logger));
     }
     status.setHandle(context.getHandle());
     b.setMemoryUsed(context.getAllocator().getAllocatedMemory());

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java 
b/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
index 7fc7d6b..3a195e8 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
@@ -28,8 +28,6 @@ import net.hydromatic.optiq.jdbc.SimpleOptiqSchema;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.util.TestTools;
 import org.apache.drill.exec.ExecTest;
-import org.apache.drill.exec.cache.DistributedCache;
-import org.apache.drill.exec.cache.local.LocalCache;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
 import org.apache.drill.exec.memory.TopLevelAllocator;
 import org.apache.drill.exec.ops.QueryContext;
@@ -70,8 +68,6 @@ public class PlanningBase extends ExecTest{
   protected void testSqlPlan(String sqlCommands) throws Exception {
     String[] sqlStrings = sqlCommands.split(";");
 
-    final DistributedCache cache = new LocalCache();
-    cache.run();
 
     final LocalPStoreProvider provider = new LocalPStoreProvider(config);
     provider.start();
@@ -91,8 +87,6 @@ public class PlanningBase extends ExecTest{
         result = config;
         dbContext.getOptionManager();
         result = systemOptions;
-        dbContext.getCache();
-        result = cache;
         dbContext.getPersistentStoreProvider();
         result = provider;
       }
@@ -126,8 +120,6 @@ public class PlanningBase extends ExecTest{
         result = queryOptions;
         context.getConfig();
         result = config;
-        context.getCache();
-        result = cache;
         context.getDrillOperatorTable();
         result = table;
       }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java 
b/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java
index bfa97d1..51f3121 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java
@@ -44,6 +44,15 @@ public class TestBugFixes extends BaseTestQuery {
     test(select);
   }
 
+  @Test
+  public void testSysDrillbits() throws Exception {
+    test("select * from sys.drillbits");
+  }
+
+  @Test
+  public void testVersionTable() throws Exception {
+    test("select * from sys.version");
+  }
 
   @Test
   public void DRILL883() throws Exception {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/test/java/org/apache/drill/exec/cache/TestCacheSerialization.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/cache/TestCacheSerialization.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/cache/TestCacheSerialization.java
deleted file mode 100644
index d507913..0000000
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/cache/TestCacheSerialization.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.cache;
-
-import java.util.List;
-
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.ExecTest;
-import org.apache.drill.exec.cache.DistributedCache.CacheConfig;
-import org.apache.drill.exec.cache.infinispan.ICache;
-import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.memory.BufferAllocator;
-import org.apache.drill.exec.memory.TopLevelAllocator;
-import 
org.apache.drill.exec.physical.impl.orderedpartitioner.OrderedPartitionRecordBatch;
-import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
-import org.apache.drill.exec.proto.UserBitShared.QueryId;
-import org.apache.drill.exec.record.MaterializedField;
-import org.apache.drill.exec.record.VectorAccessible;
-import org.apache.drill.exec.record.VectorContainer;
-import org.apache.drill.exec.record.VectorWrapper;
-import org.apache.drill.exec.record.WritableBatch;
-import org.apache.drill.exec.server.options.OptionValue;
-import org.apache.drill.exec.server.options.OptionValue.OptionType;
-import org.apache.drill.exec.vector.AllocationHelper;
-import org.apache.drill.exec.vector.IntVector;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.VarBinaryVector;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Lists;
-
-public class TestCacheSerialization extends ExecTest {
-
-  private static DistributedCache ICACHE;
-  private static BufferAllocator ALLOCATOR;
-  private static final DrillConfig CONFIG = DrillConfig.create();
-
-  @Test
-  public void protobufSerialization() throws Exception {
-    DistributedMap<String, FragmentHandle> map = 
ICACHE.getMap(CacheConfig.newBuilder(FragmentHandle.class).proto().build());
-    FragmentHandle s = 
FragmentHandle.newBuilder().setMajorFragmentId(1).setMinorFragmentId(1).setQueryId(QueryId.newBuilder().setPart1(74).setPart2(66).build()).build();
-    map.put("1", s).get();
-    for(int i =0; i < 2; i++){
-      FragmentHandle s2 = map.get("1");
-      Assert.assertEquals(s, s2);
-    }
-  }
-
-  @Test
-  public void jacksonSerialization() throws Exception {
-    OptionValue v = OptionValue.createBoolean(OptionType.SESSION, "my test 
option", true);
-    DistributedMap<String, OptionValue> map = 
ICACHE.getMap(CacheConfig.newBuilder(OptionValue.class).jackson().build());
-    map.put("1", v).get();
-    for(int i = 0; i < 5; i++){
-      OptionValue v2 = map.get("1");
-      Assert.assertEquals(v, v2);
-    }
-  }
-
-  @Test
-  public void multimapWithDrillSerializable() throws Exception {
-    List<ValueVector> vectorList = Lists.newArrayList();
-
-    MaterializedField intField = 
MaterializedField.create(SchemaPath.getSimplePath("int"),
-        Types.required(TypeProtos.MinorType.INT));
-    IntVector intVector = (IntVector) TypeHelper.getNewVector(intField, 
ALLOCATOR);
-    MaterializedField binField = 
MaterializedField.create(SchemaPath.getSimplePath("binary"),
-        Types.required(TypeProtos.MinorType.VARBINARY));
-    VarBinaryVector binVector = (VarBinaryVector) 
TypeHelper.getNewVector(binField, ALLOCATOR);
-    AllocationHelper.allocate(intVector, 4, 4);
-    AllocationHelper.allocate(binVector, 4, 5);
-    vectorList.add(intVector);
-    vectorList.add(binVector);
-
-    intVector.getMutator().setSafe(0, 0);
-    binVector.getMutator().setSafe(0, "ZERO".getBytes());
-    intVector.getMutator().setSafe(1, 1);
-    binVector.getMutator().setSafe(1, "ONE".getBytes());
-    intVector.getMutator().setSafe(2, 2);
-    binVector.getMutator().setSafe(2, "TWO".getBytes());
-    intVector.getMutator().setSafe(3, 3);
-    binVector.getMutator().setSafe(3, "THREE".getBytes());
-    intVector.getMutator().setValueCount(4);
-    binVector.getMutator().setValueCount(4);
-
-    VectorContainer container = new VectorContainer();
-    container.addCollection(vectorList);
-    container.setRecordCount(4);
-    WritableBatch batch = 
WritableBatch.getBatchNoHVWrap(container.getRecordCount(), container, false);
-    CachedVectorContainer wrap = new CachedVectorContainer(batch, ALLOCATOR);
-
-    DistributedMultiMap<String, CachedVectorContainer> mmap = 
ICACHE.getMultiMap(OrderedPartitionRecordBatch.MULTI_CACHE_CONFIG);
-    mmap.put("vectors", wrap).get();
-
-    for(int x =0; x < 2; x++){
-      CachedVectorContainer newWrap = (CachedVectorContainer) 
mmap.get("vectors").iterator().next();
-
-      VectorAccessible newContainer = newWrap.get();
-      for (VectorWrapper<?> w : newContainer) {
-        ValueVector vv = w.getValueVector();
-        int values = vv.getAccessor().getValueCount();
-        for (int i = 0; i < values; i++) {
-          Object o = vv.getAccessor().getObject(i);
-          if (o instanceof byte[]) {
-            System.out.println(new String((byte[]) o));
-          } else {
-            System.out.println(o);
-          }
-        }
-      }
-
-      newWrap.clear();
-    }
-  }
-
-  @BeforeClass
-  public static void setupCache() throws Exception {
-    ALLOCATOR = new TopLevelAllocator();
-    ICACHE = new ICache(CONFIG, ALLOCATOR, true);
-    ICACHE.run();
-  }
-
-  @AfterClass
-  public static void destroyCache() throws Exception {
-    ICACHE.close();
-    ALLOCATOR.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestClassTransformation.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestClassTransformation.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestClassTransformation.java
index 896cf4d..bb855c9 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestClassTransformation.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestClassTransformation.java
@@ -20,7 +20,6 @@ package org.apache.drill.exec.compile;
 import java.io.IOException;
 
 import org.apache.drill.BaseTestQuery;
-import org.apache.drill.exec.cache.local.LocalCache;
 import org.apache.drill.exec.compile.ClassTransformer.ClassSet;
 import org.apache.drill.exec.compile.sig.GeneratorMapping;
 import org.apache.drill.exec.compile.sig.MappingSet;
@@ -105,7 +104,7 @@ public class TestClassTransformation extends BaseTestQuery {
   private void compilationInnerClass(QueryClassLoader loader) throws Exception{
     CodeGenerator<ExampleInner> cg = newCodeGenerator(ExampleInner.class, 
ExampleTemplateWithInner.class);
 
-    ClassTransformer ct = new ClassTransformer(new LocalCache());
+    ClassTransformer ct = new ClassTransformer();
     Class<? extends ExampleInner> c = (Class<? extends ExampleInner>) 
ct.getImplementationClass(loader, cg.getDefinition(), cg.generateAndGet(), 
cg.getMaterializedClassName());
     ExampleInner t = (ExampleInner) c.newInstance();
     t.doOutside();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java
index a62409b..4aaaa78 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java
@@ -26,7 +26,6 @@ import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.logical.LogicalPlan;
 import org.apache.drill.common.util.FileUtils;
 import org.apache.drill.exec.ExecTest;
-import org.apache.drill.exec.cache.DistributedCache;
 import org.apache.drill.exec.client.DrillClient;
 import org.apache.drill.exec.coord.ClusterCoordinator;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
@@ -73,27 +72,27 @@ public class TestOptiqPlans extends ExecTest {
 
   @Test
   public void orderBy(@Injectable final BootStrapContext ctxt, @Injectable 
UserClientConnection connection,
-      @Injectable ClusterCoordinator coord, @Injectable DataConnectionCreator 
com, @Injectable DistributedCache cache,
+      @Injectable ClusterCoordinator coord, @Injectable DataConnectionCreator 
com,
       @Injectable Controller controller, @Injectable WorkEventBus workBus) 
throws Throwable {
-    SimpleRootExec exec = doLogicalTest(ctxt, connection, 
"/logical_order.json", coord, com, cache, controller, workBus);
+    SimpleRootExec exec = doLogicalTest(ctxt, connection, 
"/logical_order.json", coord, com, controller, workBus);
   }
 
   @Test
   public void stringFilter(@Injectable final BootStrapContext ctxt, 
@Injectable UserClientConnection connection,
-      @Injectable ClusterCoordinator coord, @Injectable DataConnectionCreator 
com, @Injectable DistributedCache cache,
+      @Injectable ClusterCoordinator coord, @Injectable DataConnectionCreator 
com,
       @Injectable Controller controller, @Injectable WorkEventBus workBus) 
throws Throwable {
-    SimpleRootExec exec = doLogicalTest(ctxt, connection, 
"/logical_string_filter.json", coord, com, cache, controller, workBus);
+    SimpleRootExec exec = doLogicalTest(ctxt, connection, 
"/logical_string_filter.json", coord, com, controller, workBus);
   }
 
   @Test
   public void groupBy(@Injectable final BootStrapContext bitContext, 
@Injectable UserClientConnection connection,
-      @Injectable ClusterCoordinator coord, @Injectable DataConnectionCreator 
com, @Injectable DistributedCache cache,
+      @Injectable ClusterCoordinator coord, @Injectable DataConnectionCreator 
com,
       @Injectable Controller controller, @Injectable WorkEventBus workBus) 
throws Throwable {
-    SimpleRootExec exec = doLogicalTest(bitContext, connection, 
"/logical_group.json", coord, com, cache, controller, workBus);
+    SimpleRootExec exec = doLogicalTest(bitContext, connection, 
"/logical_group.json", coord, com, controller, workBus);
   }
 
   private SimpleRootExec doLogicalTest(final BootStrapContext context, 
UserClientConnection connection, String file,
-      ClusterCoordinator coord, DataConnectionCreator com, DistributedCache 
cache, Controller controller, WorkEventBus workBus) throws Exception {
+      ClusterCoordinator coord, DataConnectionCreator com, Controller 
controller, WorkEventBus workBus) throws Exception {
     new NonStrictExpectations() {
       {
         context.getMetrics();
@@ -105,7 +104,7 @@ public class TestOptiqPlans extends ExecTest {
       }
     };
     RemoteServiceSet lss = RemoteServiceSet.getLocalServiceSet();
-    DrillbitContext bitContext = new 
DrillbitContext(DrillbitEndpoint.getDefaultInstance(), context, coord, 
controller, com, cache, workBus, new LocalPStoreProvider(DrillConfig.create()));
+    DrillbitContext bitContext = new 
DrillbitContext(DrillbitEndpoint.getDefaultInstance(), context, coord, 
controller, com, workBus, new LocalPStoreProvider(DrillConfig.create()));
     QueryContext qc = new 
QueryContext(UserSession.Builder.newBuilder().setSupportComplexTypes(true).build(),
 QueryId.getDefaultInstance(), bitContext);
     PhysicalPlanReader reader = bitContext.getPlanReader();
     LogicalPlan plan = 
reader.readLogicalPlan(Files.toString(FileUtils.getResourceAsFile(file), 
Charsets.UTF_8));

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/orderedpartitioner/TestOrderedPartitionExchange.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/orderedpartitioner/TestOrderedPartitionExchange.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/orderedpartitioner/TestOrderedPartitionExchange.java
index 8419860..27d38e6 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/orderedpartitioner/TestOrderedPartitionExchange.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/orderedpartitioner/TestOrderedPartitionExchange.java
@@ -38,6 +38,7 @@ import org.apache.drill.exec.vector.BigIntVector;
 import org.apache.drill.exec.vector.Float8Vector;
 import org.apache.drill.exec.vector.IntVector;
 import org.junit.Assert;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import com.google.common.base.Charsets;
@@ -47,6 +48,7 @@ import com.google.common.io.Files;
 /**
  * Tests the OrderedPartitionExchange Operator
  */
+@Ignore("Disabled until alternative to distributed cache provided.")
 public class TestOrderedPartitionExchange extends PopUnitTestBase {
   static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(TestOrderedPartitionExchange.class);
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/test/java/org/apache/drill/exec/server/TestBitRpc.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/server/TestBitRpc.java 
b/exec/java-exec/src/test/java/org/apache/drill/exec/server/TestBitRpc.java
index d408773..7b771f5 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/server/TestBitRpc.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/server/TestBitRpc.java
@@ -74,7 +74,7 @@ public class TestBitRpc extends ExecTest {
     BootStrapContext c2 = new BootStrapContext(DrillConfig.create());
 
     new NonStrictExpectations() {{
-      workBus.getOrCreateFragmentManager((FragmentHandle) any); result = fman;
+      workBus.getFragmentManagerIfExists((FragmentHandle) any); result = fman;
       workBus.getFragmentManager( (FragmentHandle) any); result = fman;
       fman.getFragmentContext(); result = fcon;
       fcon.getAllocator(); result = c.getAllocator();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/java-exec/src/test/java/org/apache/drill/exec/vector/complex/writer/TestJsonReader.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/vector/complex/writer/TestJsonReader.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/vector/complex/writer/TestJsonReader.java
index f450e5d..67c6dc8 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/vector/complex/writer/TestJsonReader.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/vector/complex/writer/TestJsonReader.java
@@ -17,9 +17,7 @@
  */
 package org.apache.drill.exec.vector.complex.writer;
 
-import static org.jgroups.util.Util.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.Assert.*;
 import io.netty.buffer.DrillBuf;
 
 import java.io.ByteArrayOutputStream;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/451dd608/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java 
b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java
index bfc1b8a..fbe611f 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java
@@ -110,7 +110,7 @@ public class DrillCursor implements Cursor{
           return true;
         }
       } catch (RpcException | InterruptedException | SchemaChangeException e) {
-        throw new SQLException("Failure while trying to get next result 
batch.", e);
+        throw new SQLException("Failure while executing query.", e);
       }
 
     }

Reply via email to