Repository: drill
Updated Branches:
  refs/heads/master 58c3c4c69 -> c1b847acd


http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/contrib/storage-hive/core/src/test/resources/student.txt
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/resources/student.txt 
b/contrib/storage-hive/core/src/test/resources/student.txt
new file mode 100644
index 0000000..83294bf
--- /dev/null
+++ b/contrib/storage-hive/core/src/test/resources/student.txt
@@ -0,0 +1,10 @@
+1,oscar xylophone,30,3.30,143598241627
+2,holly thompson,43,2.36,707874277199
+3,xavier polk,26,3.62,45645272898
+4,katie falkner,52,2.36,921562871038
+5,sarah xylophone,52,1.03,568813706675
+6,mike robinson,71,2.37,886568185320
+7,sarah steinbeck,43,1.96,5027961705
+8,holly van buren,22,3.46,497470771558
+9,jessica laertes,49,3.63,680463521627
+10,wendy polk,46,1.95,538178907185

http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/contrib/storage-hive/core/src/test/resources/voter.txt
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/resources/voter.txt 
b/contrib/storage-hive/core/src/test/resources/voter.txt
new file mode 100644
index 0000000..f018673
--- /dev/null
+++ b/contrib/storage-hive/core/src/test/resources/voter.txt
@@ -0,0 +1,10 @@
+1,david,davidson,49,socialist,369.78,5108
+2,priscilla,steinbeck,61,democrat,,111.76,2987
+3,ethan,allen,40,democrat,,961.51,13817
+4,zach,van,buren,71,libertarian,421.63,8822
+5,gabriella,young,74,independent,409.68,11798
+6,zach,ichabod,52,libertarian,480.76,26113
+7,tom,thompson,36,republican,830.34,28480
+8,mike,robinson,38,independent,178.64,29148
+9,luke,johnson,56,socialist,200.81,4255
+10,oscar,xylophone,74,green,265.97,24970

http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
index 06f8088..60fba0f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
@@ -19,11 +19,14 @@ package org.apache.drill.exec.ops;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.List;
 
+import com.google.common.collect.Lists;
 import io.netty.buffer.DrillBuf;
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.calcite.jdbc.SimpleCalciteSchema;
 
+import org.apache.drill.common.AutoCloseables;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
@@ -38,6 +41,7 @@ import org.apache.drill.exec.rpc.user.UserSession;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.server.options.QueryOptionManager;
+import org.apache.drill.exec.store.AbstractSchema;
 import org.apache.drill.exec.store.PartitionExplorer;
 import org.apache.drill.exec.store.PartitionExplorerImpl;
 import org.apache.drill.exec.store.SchemaConfig;
@@ -69,6 +73,8 @@ public class QueryContext implements AutoCloseable, 
UdfUtilities {
   private final QueryContextInformation queryContextInfo;
   private final ViewExpansionContext viewExpansionContext;
 
+  private final List<SchemaPlus> schemaTreesToClose;
+
   /*
    * Flag to indicate if close has been called, after calling close the first
    * time this is set to true and the close method becomes a no-op.
@@ -96,6 +102,7 @@ public class QueryContext implements AutoCloseable, 
UdfUtilities {
     // TODO(DRILL-1942) the new allocator has this capability built-in, so 
this can be removed once that is available
     bufferManager = new BufferManager(this.allocator, null);
     viewExpansionContext = new ViewExpansionContext(this);
+    schemaTreesToClose = Lists.newArrayList();
   }
 
   public PlannerSettings getPlannerSettings() {
@@ -141,7 +148,7 @@ public class QueryContext implements AutoCloseable, 
UdfUtilities {
    * @param userName User who owns the schema tree.
    * @return Root of the schema tree.
    */
-  public SchemaPlus getRootSchema(String userName) {
+  public SchemaPlus getRootSchema(final String userName) {
     final String schemaUser = isImpersonationEnabled() ? userName : 
ImpersonationUtil.getProcessUserName();
     final SchemaConfig schemaConfig = SchemaConfig.newBuilder(schemaUser, 
this).build();
     return getRootSchema(schemaConfig);
@@ -156,6 +163,7 @@ public class QueryContext implements AutoCloseable, 
UdfUtilities {
     try {
       final SchemaPlus rootSchema = 
SimpleCalciteSchema.createRootSchema(false);
       drillbitContext.getSchemaFactory().registerSchemas(schemaConfig, 
rootSchema);
+      schemaTreesToClose.add(rootSchema);
       return rootSchema;
     } catch(IOException e) {
       // We can't proceed further without a schema, throw a runtime exception.
@@ -236,12 +244,34 @@ public class QueryContext implements AutoCloseable, 
UdfUtilities {
   public void close() throws Exception {
     try {
       if (!closed) {
-        // TODO(DRILL-1942) the new allocator has this capability built-in, so 
this can be removed once that is available
-        bufferManager.close();
-        allocator.close();
+        List<AutoCloseable> toClose = Lists.newArrayList();
+
+        // TODO(DRILL-1942) the new allocator has this capability built-in, so 
we can remove bufferManager and
+        // allocator from the toClose list.
+        toClose.add(bufferManager);
+        toClose.add(allocator);
+
+        for(SchemaPlus tree : schemaTreesToClose) {
+          addSchemasToCloseList(tree, toClose);
+        }
+
+        AutoCloseables.close(toClose.toArray(new AutoCloseable[0]));
       }
     } finally {
       closed = true;
     }
   }
+
+  private void addSchemasToCloseList(final SchemaPlus tree, final 
List<AutoCloseable> toClose) {
+    for(String subSchemaName : tree.getSubSchemaNames()) {
+      addSchemasToCloseList(tree.getSubSchema(subSchemaName), toClose);
+    }
+
+    try {
+      AbstractSchema drillSchemaImpl =  tree.unwrap(AbstractSchema.class);
+      toClose.add(drillSchemaImpl);
+    } catch (ClassCastException e) {
+      // Ignore as the SchemaPlus is not an implementation of Drill schema.
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractSchema.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractSchema.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractSchema.java
index 0ba2426..a952cc2 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractSchema.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractSchema.java
@@ -38,7 +38,7 @@ import org.apache.drill.exec.planner.logical.CreateTableEntry;
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
 
-public abstract class AbstractSchema implements Schema, 
SchemaPartitionExplorer {
+public abstract class AbstractSchema implements Schema, 
SchemaPartitionExplorer, AutoCloseable {
   static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(AbstractSchema.class);
 
   protected final List<String> schemaPath;
@@ -186,5 +186,8 @@ public abstract class AbstractSchema implements Schema, 
SchemaPartitionExplorer
     return true;
   }
 
-
+  @Override
+  public void close() throws Exception {
+    // no-op: default implementation for most implementations.
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java
index 499b740..aa766be 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java
@@ -130,19 +130,7 @@ public class ImpersonationUtil {
    * @return
    */
   public static DrillFileSystem createFileSystem(String proxyUserName, 
Configuration fsConf) {
-    return createFileSystem(proxyUserName, fsConf, null);
-  }
-
-  /**
-   * Create DrillFileSystem for given <i>proxyUserName</i>, configuration and 
stats.
-   *
-   * @param proxyUserName Name of the user whom to impersonate while accessing 
the FileSystem contents.
-   * @param fsConf FileSystem configuration.
-   * @param stats OperatorStats for DrillFileSystem (optional)
-   * @return
-   */
-  public static DrillFileSystem createFileSystem(String proxyUserName, 
Configuration fsConf, OperatorStats stats) {
-    return createFileSystem(createProxyUgi(proxyUserName), fsConf, stats);
+    return createFileSystem(createProxyUgi(proxyUserName), fsConf, null);
   }
 
   /** Helper method to create DrillFileSystem */
@@ -152,7 +140,7 @@ public class ImpersonationUtil {
     try {
       fs = proxyUserUgi.doAs(new PrivilegedExceptionAction<DrillFileSystem>() {
         public DrillFileSystem run() throws Exception {
-          logger.debug("Creating DrillFileSystem for proxy user: " + 
UserGroupInformation.getCurrentUser());
+          logger.trace("Creating DrillFileSystem for proxy user: " + 
UserGroupInformation.getCurrentUser());
           return new DrillFileSystem(fsConf, stats);
         }
       });

http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java 
b/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
index a07f621..3d09d6a 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
@@ -17,7 +17,9 @@
  */
 package org.apache.drill;
 
+import java.io.File;
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.net.URL;
 import java.util.Arrays;
 import java.util.List;
@@ -25,6 +27,7 @@ import java.util.Properties;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import com.google.common.io.Files;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.ExecConstants;
@@ -362,6 +365,34 @@ public class BaseTestQuery extends ExecTest {
     return Resources.toString(url, Charsets.UTF_8);
   }
 
+  /**
+   * Copy the resource (ex. file on classpath) to a physical file on 
FileSystem.
+   * @param resource
+   * @return
+   * @throws IOException
+   */
+  public static String getPhysicalFileFromResource(final String resource) 
throws IOException {
+    final File file = File.createTempFile("tempfile", ".txt");
+    file.deleteOnExit();
+    PrintWriter printWriter = new PrintWriter(file);
+    printWriter.write(BaseTestQuery.getFile(resource));
+    printWriter.close();
+
+    return file.getPath();
+  }
+
+  /**
+   * Create a temp directory to store the given <i>dirName</i>
+   * @param dirName
+   * @return Full path including temp parent directory and given directory 
name.
+   */
+  public static String getTempDir(final String dirName) {
+    File dir = Files.createTempDir();
+    dir.deleteOnExit();
+
+    return dir.getAbsolutePath() + File.separator + dirName;
+  }
+
   private static class SilentListener implements UserResultsListener {
     private volatile UserException exception;
     private AtomicInteger count = new AtomicInteger();

http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/BaseTestImpersonation.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/BaseTestImpersonation.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/BaseTestImpersonation.java
index 56b7bde..e6d93e9 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/BaseTestImpersonation.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/BaseTestImpersonation.java
@@ -19,28 +19,54 @@ package org.apache.drill.exec.impersonation;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.io.FileUtils;
 import org.apache.drill.PlanTestBase;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.store.StoragePluginRegistry;
+import org.apache.drill.exec.store.dfs.FileSystemConfig;
 import org.apache.drill.exec.store.dfs.WorkspaceConfig;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UserGroupInformation;
 
 import java.io.File;
 import java.util.Map;
 import java.util.Properties;
 
 public class BaseTestImpersonation extends PlanTestBase {
+  protected static final String MINIDFS_STORAGE_PLUGIN_NAME = "miniDfsPlugin";
   protected static final String processUser = System.getProperty("user.name");
 
   protected static MiniDFSCluster dfsCluster;
-  protected static Configuration conf;
+  protected static Configuration dfsConf;
+  protected static FileSystem fs;
   protected static String miniDfsStoragePath;
 
+  // Test users and groups
+  protected static final String[] org1Users = { "user0_1", "user1_1", 
"user2_1", "user3_1", "user4_1", "user5_1" };
+  protected static final String[] org1Groups = { "group0_1", "group1_1", 
"group2_1", "group3_1", "group4_1", "group5_1" };
+  protected static final String[] org2Users = { "user0_2", "user1_2", 
"user2_2", "user3_2", "user4_2", "user5_2" };
+  protected static final String[] org2Groups = { "group0_2", "group1_2", 
"group2_2", "group3_2", "group4_2", "group5_2" };
+
+  static {
+    // "user0_1" belongs to "groups0_1". From "user1_1" onwards each user 
belongs to corresponding group and the group
+    // before it, i.e "user1_1" belongs to "group1_1" and "group0_1" and so on.
+    UserGroupInformation.createUserForTesting(org1Users[0], new 
String[]{org1Groups[0]});
+    for(int i=1; i<org1Users.length; i++) {
+      UserGroupInformation.createUserForTesting(org1Users[i], new String[] { 
org1Groups[i], org1Groups[i-1] });
+    }
+
+    UserGroupInformation.createUserForTesting(org2Users[0], new String[] { 
org2Groups[0] });
+    for(int i=1; i<org2Users.length; i++) {
+      UserGroupInformation.createUserForTesting(org2Users[i], new String[] { 
org2Groups[i], org2Groups[i-1] });
+    }
+  }
+
   /**
    * Start a MiniDFS cluster backed Drillbit cluster with impersonation 
enabled.
    * @param testClass
@@ -59,34 +85,56 @@ public class BaseTestImpersonation extends PlanTestBase {
   protected static void startMiniDfsCluster(
       final String testClass, final boolean isImpersonationEnabled) throws 
Exception {
     Preconditions.checkArgument(!Strings.isNullOrEmpty(testClass), "Expected a 
non-null and non-empty test class name");
-    conf = new Configuration();
+    dfsConf = new Configuration();
 
     // Set the MiniDfs base dir to be the temp directory of the test, so that 
all files created within the MiniDfs
     // are properly cleanup when test exits.
     miniDfsStoragePath = System.getProperty("java.io.tmpdir") + Path.SEPARATOR 
+ testClass;
-    conf.set("hdfs.minidfs.basedir", miniDfsStoragePath);
+    dfsConf.set("hdfs.minidfs.basedir", miniDfsStoragePath);
 
     if (isImpersonationEnabled) {
       // Set the proxyuser settings so that the user who is running the 
Drillbits/MiniDfs can impersonate other users.
-      conf.set(String.format("hadoop.proxyuser.%s.hosts", processUser), "*");
-      conf.set(String.format("hadoop.proxyuser.%s.groups", processUser), "*");
+      dfsConf.set(String.format("hadoop.proxyuser.%s.hosts", processUser), 
"*");
+      dfsConf.set(String.format("hadoop.proxyuser.%s.groups", processUser), 
"*");
     }
 
     // Start the MiniDfs cluster
-    dfsCluster = new MiniDFSCluster.Builder(conf)
+    dfsCluster = new MiniDFSCluster.Builder(dfsConf)
         .numDataNodes(3)
         .format(true)
         .build();
 
+    fs = dfsCluster.getFileSystem();
+  }
+
+  protected static void startDrillCluster(final boolean 
isImpersonationEnabled) throws Exception {
     final Properties props = cloneDefaultTestConfigProperties();
     props.setProperty(ExecConstants.IMPERSONATION_ENABLED, 
Boolean.toString(isImpersonationEnabled));
 
     updateTestCluster(1, DrillConfig.create(props));
   }
 
-  protected static void createAndAddWorkspace(FileSystem fs, String name, 
String path, short permissions, String owner,
-      String group,
-      Map<String, WorkspaceConfig> workspaces) throws Exception {
+  protected static void addMiniDfsBasedStorage(final Map<String, 
WorkspaceConfig> workspaces)
+      throws Exception {
+    // Create a HDFS based storage plugin based on local storage plugin and 
add it to plugin registry (connection string
+    // for mini dfs is varies for each run).
+    final StoragePluginRegistry pluginRegistry = 
getDrillbitContext().getStorage();
+    final FileSystemConfig lfsPluginConfig = (FileSystemConfig) 
pluginRegistry.getPlugin("dfs_test").getConfig();
+
+    final FileSystemConfig miniDfsPluginConfig = new FileSystemConfig();
+    miniDfsPluginConfig.connection = 
dfsConf.get(FileSystem.FS_DEFAULT_NAME_KEY);
+
+    createAndAddWorkspace("tmp", "/tmp", (short)0777, processUser, 
processUser, workspaces);
+
+    miniDfsPluginConfig.workspaces = workspaces;
+    miniDfsPluginConfig.formats = ImmutableMap.copyOf(lfsPluginConfig.formats);
+    miniDfsPluginConfig.setEnabled(true);
+
+    pluginRegistry.createOrUpdate(MINIDFS_STORAGE_PLUGIN_NAME, 
miniDfsPluginConfig, true);
+  }
+
+  protected static void createAndAddWorkspace(String name, String path, short 
permissions, String owner,
+      String group, final Map<String, WorkspaceConfig> workspaces) throws 
Exception {
     final Path dirPath = new Path(path);
     FileSystem.mkdirs(fs, dirPath, new FsPermission(permissions));
     fs.setOwner(dirPath, owner, group);

http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationDisabledWithMiniDFS.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationDisabledWithMiniDFS.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationDisabledWithMiniDFS.java
index 6c3b96f..e01e45d 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationDisabledWithMiniDFS.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationDisabledWithMiniDFS.java
@@ -17,51 +17,32 @@
  */
 package org.apache.drill.exec.impersonation;
 
-import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
-import org.apache.drill.exec.store.StoragePluginRegistry;
-import org.apache.drill.exec.store.dfs.FileSystemConfig;
 import org.apache.drill.exec.store.dfs.WorkspaceConfig;
-import org.apache.hadoop.fs.FileSystem;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.util.Map;
-
 public class TestImpersonationDisabledWithMiniDFS extends 
BaseTestImpersonation {
-  private static final String MINIDFS_STORAGE_PLUGIN_NAME =
-      "minidfs" + TestImpersonationDisabledWithMiniDFS.class.getSimpleName();
 
   @BeforeClass
-  public static void addMiniDfsBasedStorage() throws Exception {
+  public static void setup() throws Exception {
     
startMiniDfsCluster(TestImpersonationDisabledWithMiniDFS.class.getSimpleName(), 
false);
+    startDrillCluster(false);
+    addMiniDfsBasedStorage(Maps.<String, WorkspaceConfig>newHashMap());
+    createTestData();
+  }
 
-    // Create a HDFS based storage plugin based on local storage plugin and 
add it to plugin registry (connection string
-    // for mini dfs is varies for each run).
-    final StoragePluginRegistry pluginRegistry = 
getDrillbitContext().getStorage();
-    final FileSystemConfig lfsPluginConfig = (FileSystemConfig) 
pluginRegistry.getPlugin("dfs_test").getConfig();
-
-    final FileSystemConfig miniDfsPluginConfig = new FileSystemConfig();
-    miniDfsPluginConfig.connection = conf.get(FileSystem.FS_DEFAULT_NAME_KEY);
-
-    Map<String, WorkspaceConfig> workspaces = 
Maps.newHashMap(lfsPluginConfig.workspaces);
-    createAndAddWorkspace(dfsCluster.getFileSystem(), "dfstemp", "/tmp", 
(short)0777, processUser, processUser, workspaces);
-
-    miniDfsPluginConfig.workspaces = workspaces;
-    miniDfsPluginConfig.formats = ImmutableMap.copyOf(lfsPluginConfig.formats);
-    miniDfsPluginConfig.setEnabled(true);
-
-    pluginRegistry.createOrUpdate(MINIDFS_STORAGE_PLUGIN_NAME, 
miniDfsPluginConfig, true);
-
+  private static void createTestData() throws Exception {
     // Create test table in minidfs.tmp schema for use in test queries
-    test(String.format("CREATE TABLE %s.dfstemp.dfsRegion AS SELECT * FROM 
cp.`region.json`", MINIDFS_STORAGE_PLUGIN_NAME));
+    test(String.format("CREATE TABLE %s.tmp.dfsRegion AS SELECT * FROM 
cp.`region.json`",
+        MINIDFS_STORAGE_PLUGIN_NAME));
   }
 
   @Test // DRILL-3037
   public void testSimpleQuery() throws Exception {
     final String query =
-        String.format("SELECT sales_city, sales_country FROM dfstemp.dfsRegion 
ORDER BY region_id DESC LIMIT 2");
+        String.format("SELECT sales_city, sales_country FROM tmp.dfsRegion 
ORDER BY region_id DESC LIMIT 2");
 
     testBuilder()
         .optionSettingQueriesForTestQuery(String.format("USE %s", 
MINIDFS_STORAGE_PLUGIN_NAME))

http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
index 27e737d..ef3765e 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
@@ -18,13 +18,8 @@
 package org.apache.drill.exec.impersonation;
 
 import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
-import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.exceptions.UserRemoteException;
-import org.apache.drill.exec.rpc.RpcException;
-import org.apache.drill.exec.store.StoragePluginRegistry;
-import org.apache.drill.exec.store.dfs.FileSystemConfig;
 import org.apache.drill.exec.store.dfs.WorkspaceConfig;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -44,8 +39,6 @@ import static org.junit.Assert.assertThat;
  * Tests impersonation on metadata related queries as SHOW FILES, SHOW TABLES, 
CREATE VIEW and CREATE TABLE
  */
 public class TestImpersonationMetadata extends BaseTestImpersonation {
-  private static final String MINIDFS_STORAGE_PLUGIN_NAME = "minidfs" + 
TestImpersonationMetadata.class.getSimpleName();
-
   private static final String user1 = "drillTestUser1";
   private static final String user2 = "drillTestUser2";
 
@@ -58,50 +51,39 @@ public class TestImpersonationMetadata extends 
BaseTestImpersonation {
   }
 
   @BeforeClass
-  public static void addMiniDfsBasedStorage() throws Exception {
+  public static void setup() throws Exception {
     startMiniDfsCluster(TestImpersonationMetadata.class.getSimpleName());
-
-    final StoragePluginRegistry pluginRegistry = 
getDrillbitContext().getStorage();
-    final FileSystemConfig lfsPluginConfig = (FileSystemConfig) 
pluginRegistry.getPlugin("dfs").getConfig();
-
-    final FileSystemConfig miniDfsPluginConfig = new FileSystemConfig();
-    miniDfsPluginConfig.connection = conf.get(FileSystem.FS_DEFAULT_NAME_KEY);
-
-    Map<String, WorkspaceConfig> workspaces = 
Maps.newHashMap(lfsPluginConfig.workspaces);
-
-    createTestWorkspaces(workspaces);
-
-    miniDfsPluginConfig.workspaces = workspaces;
-    miniDfsPluginConfig.formats = ImmutableMap.copyOf(lfsPluginConfig.formats);
-    miniDfsPluginConfig.setEnabled(true);
-
-    pluginRegistry.createOrUpdate(MINIDFS_STORAGE_PLUGIN_NAME, 
miniDfsPluginConfig, true);
+    startDrillCluster(true);
+    addMiniDfsBasedStorage(createTestWorkspaces());
   }
 
-  private static void createTestWorkspaces(Map<String, WorkspaceConfig> 
workspaces) throws Exception {
+  private static Map<String , WorkspaceConfig> createTestWorkspaces() throws 
Exception {
     // Create "/tmp" folder and set permissions to "777"
-    final FileSystem fs = dfsCluster.getFileSystem();
     final Path tmpPath = new Path("/tmp");
     fs.delete(tmpPath, true);
     FileSystem.mkdirs(fs, tmpPath, new FsPermission((short)0777));
 
+    Map<String, WorkspaceConfig> workspaces = Maps.newHashMap();
+
     // Create /drillTestGrp0_700 directory with permissions 700 (owned by user 
running the tests)
-    createAndAddWorkspace(fs, "drillTestGrp0_700", "/drillTestGrp0_700", 
(short)0700, processUser, group0, workspaces);
+    createAndAddWorkspace("drillTestGrp0_700", "/drillTestGrp0_700", 
(short)0700, processUser, group0, workspaces);
 
     // Create /drillTestGrp0_750 directory with permissions 750 (owned by user 
running the tests)
-    createAndAddWorkspace(fs, "drillTestGrp0_750", "/drillTestGrp0_750", 
(short)0750, processUser, group0, workspaces);
+    createAndAddWorkspace("drillTestGrp0_750", "/drillTestGrp0_750", 
(short)0750, processUser, group0, workspaces);
 
     // Create /drillTestGrp0_755 directory with permissions 755 (owned by user 
running the tests)
-    createAndAddWorkspace(fs, "drillTestGrp0_755", "/drillTestGrp0_755", 
(short)0755, processUser, group0, workspaces);
+    createAndAddWorkspace("drillTestGrp0_755", "/drillTestGrp0_755", 
(short)0755, processUser, group0, workspaces);
 
     // Create /drillTestGrp0_770 directory with permissions 770 (owned by user 
running the tests)
-    createAndAddWorkspace(fs, "drillTestGrp0_770", "/drillTestGrp0_770", 
(short)0770, processUser, group0, workspaces);
+    createAndAddWorkspace("drillTestGrp0_770", "/drillTestGrp0_770", 
(short)0770, processUser, group0, workspaces);
 
     // Create /drillTestGrp0_777 directory with permissions 777 (owned by user 
running the tests)
-    createAndAddWorkspace(fs, "drillTestGrp0_777", "/drillTestGrp0_777", 
(short)0777, processUser, group0, workspaces);
+    createAndAddWorkspace("drillTestGrp0_777", "/drillTestGrp0_777", 
(short)0777, processUser, group0, workspaces);
 
     // Create /drillTestGrp1_700 directory with permissions 700 (owned by 
user1)
-    createAndAddWorkspace(fs, "drillTestGrp1_700", "/drillTestGrp1_700", 
(short)0700, user1, group1, workspaces);
+    createAndAddWorkspace("drillTestGrp1_700", "/drillTestGrp1_700", 
(short)0700, user1, group1, workspaces);
+
+    return workspaces;
   }
 
   @Test // DRILL-3037
@@ -278,7 +260,6 @@ public class TestImpersonationMetadata extends 
BaseTestImpersonation {
 
     } finally {
       // There is no drop table, we need to delete the table directory through 
FileSystem object
-      final FileSystem fs = dfsCluster.getFileSystem();
       final Path tablePath = new Path(Path.SEPARATOR + tableWS + 
Path.SEPARATOR + tableName);
       if (fs.exists(tablePath)) {
         fs.delete(tablePath, true);

http://git-wip-us.apache.org/repos/asf/drill/blob/c1b847ac/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationQueries.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationQueries.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationQueries.java
index 14392c9..f00dc55 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationQueries.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationQueries.java
@@ -17,20 +17,15 @@
  */
 package org.apache.drill.exec.impersonation;
 
-import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
 import org.apache.drill.common.exceptions.UserRemoteException;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.dotdrill.DotDrillType;
-import org.apache.drill.exec.rpc.RpcException;
-import org.apache.drill.exec.store.StoragePluginRegistry;
-import org.apache.drill.exec.store.dfs.FileSystemConfig;
 import org.apache.drill.exec.store.dfs.WorkspaceConfig;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -47,47 +42,15 @@ import static org.junit.Assert.assertThat;
  * a nested view.
  */
 public class TestImpersonationQueries extends BaseTestImpersonation {
-  private static final String MINIDFS_STORAGE_PLUGIN_NAME = "minidfs" + 
TestImpersonationQueries.class.getSimpleName();
-
-  private static final String[] org1Users = { "user0_1", "user1_1", "user2_1", 
"user3_1", "user4_1", "user5_1" };
-  private static final String[] org1Groups = { "group0_1", "group1_1", 
"group2_1", "group3_1", "group4_1", "group5_1" };
-  private static final String[] org2Users = { "user0_2", "user1_2", "user2_2", 
"user3_2", "user4_2", "user5_2" };
-  private static final String[] org2Groups = { "group0_2", "group1_2", 
"group2_2", "group3_2", "group4_2", "group5_2" };
-
-  static {
-    // "user0_1" belongs to "groups0_1". From "user1_1" onwards each user 
belongs to corresponding group and the group
-    // before it, i.e "user1_1" belongs to "group1_1" and "group0_1" and so on.
-    UserGroupInformation.createUserForTesting(org1Users[0], new String[] { 
org1Groups[0] });
-    for(int i=1; i<org1Users.length; i++) {
-      UserGroupInformation.createUserForTesting(org1Users[i], new String[] { 
org1Groups[i], org1Groups[i-1] });
-    }
-
-    UserGroupInformation.createUserForTesting(org2Users[0], new String[] { 
org2Groups[0] });
-    for(int i=1; i<org2Users.length; i++) {
-      UserGroupInformation.createUserForTesting(org2Users[i], new String[] { 
org2Groups[i], org2Groups[i-1] });
-    }
-  }
-
   @BeforeClass
-  public static void addMiniDfsBasedStorageAndGenerateTestData() throws 
Exception {
+  public static void setup() throws Exception {
     startMiniDfsCluster(TestImpersonationQueries.class.getSimpleName());
+    startDrillCluster(true);
+    addMiniDfsBasedStorage(createTestWorkspaces());
+    createTestData();
+  }
 
-    final StoragePluginRegistry pluginRegistry = 
getDrillbitContext().getStorage();
-    final FileSystemConfig lfsPluginConfig = (FileSystemConfig) 
pluginRegistry.getPlugin("dfs").getConfig();
-
-    final FileSystemConfig miniDfsPluginConfig = new FileSystemConfig();
-    miniDfsPluginConfig.connection = conf.get(FileSystem.FS_DEFAULT_NAME_KEY);
-
-    Map<String, WorkspaceConfig> workspaces = 
Maps.newHashMap(lfsPluginConfig.workspaces);
-
-    createTestWorkspaces(workspaces);
-
-    miniDfsPluginConfig.workspaces = workspaces;
-    miniDfsPluginConfig.formats = ImmutableMap.copyOf(lfsPluginConfig.formats);
-    miniDfsPluginConfig.setEnabled(true);
-
-    pluginRegistry.createOrUpdate(MINIDFS_STORAGE_PLUGIN_NAME, 
miniDfsPluginConfig, true);
-
+  private static void createTestData() throws Exception {
     // Create test tables/views
 
     // Create copy of "lineitem" table in /user/user0_1 owned by 
user0_1:group0_1 with permissions 750. Only user0_1
@@ -111,26 +74,29 @@ public class TestImpersonationQueries extends 
BaseTestImpersonation {
     return MINIDFS_STORAGE_PLUGIN_NAME + "." + user;
   }
 
-  private static void createTestWorkspaces(Map<String, WorkspaceConfig> 
workspaces) throws Exception {
+  private static Map<String, WorkspaceConfig> createTestWorkspaces() throws 
Exception {
     // Create "/tmp" folder and set permissions to "777"
-    final FileSystem fs = dfsCluster.getFileSystem();
     final Path tmpPath = new Path("/tmp");
     fs.delete(tmpPath, true);
     FileSystem.mkdirs(fs, tmpPath, new FsPermission((short)0777));
 
+    Map<String, WorkspaceConfig> workspaces = Maps.newHashMap();
+
     // create user directory (ex. "/user/user0_1", with ownership 
"user0_1:group0_1" and perms 755) for every user.
     for(int i=0; i<org1Users.length; i++) {
       final String user = org1Users[i];
       final String group = org1Groups[i];
-      createAndAddWorkspace(fs, user, getUserHome(user), (short)0755, user, 
group, workspaces);
+      createAndAddWorkspace(user, getUserHome(user), (short)0755, user, group, 
workspaces);
     }
 
     // create user directory (ex. "/user/user0_2", with ownership 
"user0_2:group0_2" and perms 755) for every user.
     for(int i=0; i<org2Users.length; i++) {
       final String user = org2Users[i];
       final String group = org2Groups[i];
-      createAndAddWorkspace(fs, user, getUserHome(user), (short)0755, user, 
group, workspaces);
+      createAndAddWorkspace(user, getUserHome(user), (short)0755, user, group, 
workspaces);
     }
+
+    return workspaces;
   }
 
   private static void createTestTable(String user, String group, String 
tableName) throws Exception {
@@ -141,7 +107,6 @@ public class TestImpersonationQueries extends 
BaseTestImpersonation {
     // Change the ownership and permissions manually. Currently there is no 
option to specify the default permissions
     // and ownership for new tables.
     final Path tablePath = new Path(getUserHome(user), tableName);
-    final FileSystem fs = dfsCluster.getFileSystem();
 
     fs.setOwner(tablePath, user, group);
     fs.setPermission(tablePath, new FsPermission((short)0750));
@@ -208,7 +173,7 @@ public class TestImpersonationQueries extends 
BaseTestImpersonation {
 
     // Verify the view file created has the expected permissions and ownership
     Path viewFilePath = new Path(getUserHome(viewOwner), newViewName + 
DotDrillType.VIEW.getEnding());
-    FileStatus status = dfsCluster.getFileSystem().getFileStatus(viewFilePath);
+    FileStatus status = fs.getFileStatus(viewFilePath);
     assertEquals(viewGroup, status.getGroup());
     assertEquals(viewOwner, status.getOwner());
     assertEquals(viewPerms, status.getPermission().toShort());
@@ -244,7 +209,8 @@ public class TestImpersonationQueries extends 
BaseTestImpersonation {
 
     assertNotNull("UserRemoteException is expected", ex);
     assertThat(ex.getMessage(), containsString("PERMISSION ERROR: " +
-            "Not authorized to read table [lineitem] in schema 
[minidfsTestImpersonationQueries.user0_1]"));
+            String.format("Not authorized to read table [lineitem] in schema 
[%s.user0_1]",
+                MINIDFS_STORAGE_PLUGIN_NAME)));
   }
 
 

Reply via email to