ilooner closed pull request #1477: DRILL-6753: Fix show files command to return 
result the same way as before
URL: https://github.com/apache/drill/pull/1477
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFilesHandler.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFilesHandler.java
index c9bac32f21e..75abfdd65be 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFilesHandler.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFilesHandler.java
@@ -19,53 +19,39 @@
 
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.calcite.sql.SqlIdentifier;
-import org.apache.calcite.sql.SqlLiteral;
 import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.SqlNodeList;
-import org.apache.calcite.sql.SqlSelect;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.calcite.util.Util;
 import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.physical.PhysicalPlan;
+import org.apache.drill.exec.planner.sql.DirectPlan;
 import org.apache.drill.exec.planner.sql.SchemaUtilites;
-import org.apache.drill.exec.planner.sql.parser.DrillParserUtil;
 import org.apache.drill.exec.planner.sql.parser.SqlShowFiles;
 import org.apache.drill.exec.store.AbstractSchema;
 import org.apache.drill.exec.store.dfs.WorkspaceSchemaFactory.WorkspaceSchema;
-import org.apache.drill.exec.store.ischema.InfoSchemaTableType;
+import org.apache.drill.exec.store.ischema.Records;
+import org.apache.drill.exec.util.FileSystemUtil;
 import org.apache.drill.exec.work.foreman.ForemanSetupException;
+import org.apache.hadoop.fs.Path;
 
-import java.util.Arrays;
-import java.util.Collections;
+import java.io.IOException;
+import java.sql.Timestamp;
 import java.util.List;
-
-import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.FILES_COL_RELATIVE_PATH;
-import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.FILES_COL_SCHEMA_NAME;
-import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.IS_SCHEMA_NAME;
-
+import java.util.stream.Collectors;
 
 public class ShowFilesHandler extends DefaultSqlHandler {
+
   private static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(SetOptionHandler.class);
 
   public ShowFilesHandler(SqlHandlerConfig config) {
     super(config);
   }
 
-  /** Rewrite the parse tree as SELECT ... FROM INFORMATION_SCHEMA.FILES ... */
   @Override
-  public SqlNode rewrite(SqlNode sqlNode) throws ForemanSetupException {
-
-    List<SqlNode> selectList = 
Collections.singletonList(SqlIdentifier.star(SqlParserPos.ZERO));
-
-    SqlNode fromClause = new SqlIdentifier(Arrays.asList(IS_SCHEMA_NAME, 
InfoSchemaTableType.FILES.name()), SqlParserPos.ZERO);
-
+  public PhysicalPlan getPlan(SqlNode sqlNode) throws ForemanSetupException, 
IOException {
     SchemaPlus defaultSchema = config.getConverter().getDefaultSchema();
     SchemaPlus drillSchema = defaultSchema;
-
     SqlShowFiles showFiles = unwrap(sqlNode, SqlShowFiles.class);
     SqlIdentifier from = showFiles.getDb();
-    boolean addRelativePathLikeClause = false;
+    String fromDir = "./";
 
     // Show files can be used without from clause, in which case we display 
the files in the default schema
     if (from != null) {
@@ -75,7 +61,8 @@ public SqlNode rewrite(SqlNode sqlNode) throws 
ForemanSetupException {
       if (drillSchema == null) {
         // Entire from clause is not a schema, try to obtain the schema 
without the last part of the specified clause.
         drillSchema = SchemaUtilites.findSchema(defaultSchema, 
from.names.subList(0, from.names.size() - 1));
-        addRelativePathLikeClause = true;
+        // Listing for specific directory: show files in 
dfs.tmp.specific_directory
+        fromDir = fromDir + from.names.get((from.names.size() - 1));
       }
 
       if (drillSchema == null) {
@@ -85,11 +72,9 @@ public SqlNode rewrite(SqlNode sqlNode) throws 
ForemanSetupException {
       }
     }
 
-    String fullSchemaName;
-
+    WorkspaceSchema wsSchema;
     try {
-      WorkspaceSchema wsSchema = (WorkspaceSchema) 
drillSchema.unwrap(AbstractSchema.class).getDefaultSchema();
-      fullSchemaName = wsSchema.getFullSchemaName();
+      wsSchema = (WorkspaceSchema) 
drillSchema.unwrap(AbstractSchema.class).getDefaultSchema();
     } catch (ClassCastException e) {
       throw UserException.validationError()
           .message("SHOW FILES is supported in workspace type schema only. 
Schema [%s] is not a workspace schema.",
@@ -97,28 +82,43 @@ public SqlNode rewrite(SqlNode sqlNode) throws 
ForemanSetupException {
           .build(logger);
     }
 
-    SqlNode whereClause = DrillParserUtil.createCondition(new 
SqlIdentifier(FILES_COL_SCHEMA_NAME, SqlParserPos.ZERO),
-        SqlStdOperatorTable.EQUALS, 
SqlLiteral.createCharString(fullSchemaName, SqlParserPos.ZERO));
+    Path path = new Path(wsSchema.getDefaultLocation(), fromDir);
+    List<ShowFilesCommandResult> records = 
FileSystemUtil.listAll(wsSchema.getFS(), path, false).stream()
+        // use ShowFilesCommandResult for backward compatibility
+        .map(fileStatus -> new ShowFilesCommandResult(new 
Records.File(wsSchema.getFullSchemaName(), wsSchema, fileStatus)))
+        .collect(Collectors.toList());
 
-    // listing for specific directory: show files in dfs.tmp.specific_directory
-    if (addRelativePathLikeClause) {
-      if 
(!context.getOptions().getBoolean(ExecConstants.LIST_FILES_RECURSIVELY)) {
-        throw UserException.validationError()
-            .message("To SHOW FILES in specific directory, enable option %s", 
ExecConstants.LIST_FILES_RECURSIVELY)
-            .build(logger);
-      }
-
-      // like clause: relative_path like 'specific_directory/%'
-      String folderPath = from.names.get(from.names.size() - 1);
-      folderPath = folderPath.endsWith("/") ? folderPath : folderPath + "/";
-      SqlNode likeLiteral = SqlLiteral.createCharString(folderPath + "%", 
Util.getDefaultCharset().name(), SqlParserPos.ZERO);
-      SqlNode likeClause = DrillParserUtil.createCondition(new 
SqlIdentifier(FILES_COL_RELATIVE_PATH, SqlParserPos.ZERO),
-          SqlStdOperatorTable.LIKE, likeLiteral);
+    return DirectPlan.createDirectPlan(context.getCurrentEndpoint(), records, 
ShowFilesCommandResult.class);
+  }
 
-      whereClause = DrillParserUtil.createCondition(whereClause, 
SqlStdOperatorTable.AND, likeClause);
+  /**
+   * Original show files command result holder is used as wrapper over new 
{@link Records.File} holder
+   * to maintain backward compatibility with ODBC driver etc. in column names 
and types.
+   */
+  public static class ShowFilesCommandResult {
+
+    public final String name;
+    public final boolean isDirectory;
+    public final boolean isFile;
+    public final long length;
+    public final String owner;
+    public final String group;
+    public final String permissions;
+    public final Timestamp accessTime;
+    public final Timestamp modificationTime;
+
+    public ShowFilesCommandResult(Records.File fileRecord) {
+      this.name = fileRecord.FILE_NAME;
+      this.isDirectory = fileRecord.IS_DIRECTORY;
+      this.isFile = fileRecord.IS_FILE;
+      this.length = fileRecord.LENGTH;
+      this.owner = fileRecord.OWNER;
+      this.group = fileRecord.GROUP;
+      this.permissions = fileRecord.PERMISSION;
+      this.accessTime = fileRecord.ACCESS_TIME;
+      this.modificationTime = fileRecord.MODIFICATION_TIME;
     }
 
-    return new SqlSelect(SqlParserPos.ZERO, null, new SqlNodeList(selectList, 
SqlParserPos.ZERO), fromClause, whereClause,
-        null, null, null, null, null, null);
   }
+
 }
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaConstants.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaConstants.java
index 63e19d3ffbb..4c07f306e1c 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaConstants.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaConstants.java
@@ -100,5 +100,6 @@
    String FILES_COL_OWNER = "OWNER";
    String FILES_COL_GROUP = "GROUP";
    String FILES_COL_PERMISSION = "PERMISSION";
+   String FILES_COL_ACCESS_TIME = "ACCESS_TIME";
    String FILES_COL_MODIFICATION_TIME = "MODIFICATION_TIME";
 }
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
index 1f99c0e3773..e09942edba0 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
@@ -33,6 +33,7 @@
 import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.COLS_COL_NUMERIC_PRECISION_RADIX;
 import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.COLS_COL_NUMERIC_SCALE;
 import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.COLS_COL_ORDINAL_POSITION;
+import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.FILES_COL_ACCESS_TIME;
 import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.FILES_COL_FILE_NAME;
 import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.FILES_COL_GROUP;
 import static 
org.apache.drill.exec.store.ischema.InfoSchemaConstants.FILES_COL_IS_DIRECTORY;
@@ -90,6 +91,7 @@ public static Field create(String name, MajorType type) {
   public static final MajorType BIGINT = Types.required(MinorType.BIGINT);
   public static final MajorType VARCHAR = Types.required(MinorType.VARCHAR);
   public static final MajorType BIT = Types.required(MinorType.BIT);
+  public static final MajorType TIMESTAMP = 
Types.required(MinorType.TIMESTAMP);
 
   private final List<Field> fields;
 
@@ -121,8 +123,11 @@ private RelDataType getRelDataType(RelDataTypeFactory 
typeFactory, MajorType typ
         return typeFactory.createSqlType(SqlTypeName.VARCHAR, 
Integer.MAX_VALUE);
       case BIT:
         return typeFactory.createSqlType(SqlTypeName.BOOLEAN);
+      case TIMESTAMP:
+        return typeFactory.createSqlType(SqlTypeName.TIMESTAMP);
       default:
-        throw new UnsupportedOperationException("Only INT, BIGINT, VARCHAR and 
BOOLEAN types are supported in " + InfoSchemaConstants.IS_SCHEMA_NAME);
+        throw new UnsupportedOperationException("Only INT, BIGINT, VARCHAR, 
BOOLEAN and TIMESTAMP types are supported in " +
+            InfoSchemaConstants.IS_SCHEMA_NAME);
     }
   }
 
@@ -275,7 +280,8 @@ public Columns() {
         Field.create(FILES_COL_OWNER, VARCHAR),
         Field.create(FILES_COL_GROUP, VARCHAR),
         Field.create(FILES_COL_PERMISSION, VARCHAR),
-        Field.create(FILES_COL_MODIFICATION_TIME, VARCHAR)
+        Field.create(FILES_COL_ACCESS_TIME, TIMESTAMP),
+        Field.create(FILES_COL_MODIFICATION_TIME, TIMESTAMP)
     );
 
     public Files() {
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
index 086aa0d82ea..5608560a2d2 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
@@ -31,9 +31,10 @@
 
 import org.apache.drill.shaded.guava.com.google.common.base.MoreObjects;
 
+import java.sql.Timestamp;
 import java.time.Instant;
+import java.time.ZoneId;
 import java.time.ZoneOffset;
-import java.time.format.DateTimeFormatter;
 
 public class Records {
 
@@ -565,7 +566,8 @@ public Schema(String catalog, String name, String owner, 
String type, boolean is
     public final String OWNER;
     public final String GROUP;
     public final String PERMISSION;
-    public final String MODIFICATION_TIME;
+    public final Timestamp ACCESS_TIME;
+    public final Timestamp MODIFICATION_TIME;
 
     public File(String schemaName, WorkspaceSchemaFactory.WorkspaceSchema 
wsSchema, FileStatus fileStatus) {
       this.SCHEMA_NAME = schemaName;
@@ -580,9 +582,22 @@ public File(String schemaName, 
WorkspaceSchemaFactory.WorkspaceSchema wsSchema,
       this.OWNER = fileStatus.getOwner();
       this.GROUP = fileStatus.getGroup();
       this.PERMISSION = fileStatus.getPermission().toString();
-      this.MODIFICATION_TIME = DateTimeFormatter.ofPattern("yyyy-MM-dd 
HH:mm:ss.SSS")
-          .withZone(ZoneOffset.UTC)
-          .format(Instant.ofEpochMilli(fileStatus.getModificationTime()));
+      this.ACCESS_TIME = 
getTimestampWithReplacedZone(fileStatus.getAccessTime());
+      this.MODIFICATION_TIME = 
getTimestampWithReplacedZone(fileStatus.getModificationTime());
+    }
+
+    /**
+     * Convert milliseconds into sql timestamp.
+     * Get the timestamp in UTC because Drill's internal TIMESTAMP stores time 
in UTC.
+     *
+     * @param ms milliseconds
+     * @return sql timestamp instance
+     */
+    private Timestamp getTimestampWithReplacedZone(long ms) {
+      return Timestamp.from(Instant.ofEpochMilli(ms)
+          .atZone(ZoneId.systemDefault())
+          .withZoneSameLocal(ZoneOffset.UTC)
+          .toInstant());
     }
   }
 }
\ No newline at end of file
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
index fd747bbcbbb..9acd0154505 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
@@ -23,7 +23,6 @@
 import org.apache.drill.categories.UnlikelyTest;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.exceptions.UserRemoteException;
-import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.store.dfs.WorkspaceConfig;
 import org.apache.drill.categories.SlowTest;
 import org.apache.hadoop.fs.FileSystem;
@@ -33,15 +32,14 @@
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.ExpectedException;
 
 import java.util.Map;
 
 import static org.hamcrest.core.StringContains.containsString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -60,6 +58,9 @@
     UserGroupInformation.createUserForTesting(user2, new String[]{ group1 });
   }
 
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
   @BeforeClass
   public static void setup() throws Exception {
     startMiniDfsCluster(TestImpersonationMetadata.class.getSimpleName());
@@ -176,14 +177,11 @@ public void 
testShowFilesInWSWithOtherPermissionsForQueryUser() throws Exception
   public void testShowFilesInWSWithNoPermissionsForQueryUser() throws 
Exception {
     updateClient(user2);
 
-    try {
-      setSessionOption(ExecConstants.LIST_FILES_RECURSIVELY, true);
-      // Try show tables in schema "drill_test_grp_1_700" which is owned by 
"user1"
-      int count = testSql(String.format("SHOW FILES IN 
%s.drill_test_grp_1_700", MINI_DFS_STORAGE_PLUGIN_NAME));
-      assertEquals("Counts should match", 0, count);
-    } finally {
-      resetSessionOption(ExecConstants.LIST_FILES_RECURSIVELY);
-    }
+    thrown.expect(UserRemoteException.class);
+    thrown.expectMessage(containsString("Permission denied: 
user=drillTestUser2, " +
+        "access=READ_EXECUTE, 
inode=\"/drill_test_grp_1_700\":drillTestUser1:drill_test_grp_1:drwx------"));
+    // Try show tables in schema "drill_test_grp_1_700" which is owned by 
"user1"
+    test("SHOW FILES IN %s.drill_test_grp_1_700", 
MINI_DFS_STORAGE_PLUGIN_NAME);
   }
 
   @Test
@@ -345,25 +343,18 @@ private static void testCreateTableTestHelper(String 
user, String tableWS,
   @Test
   public void testCreateTableInWSWithNoPermissionsForQueryUser() throws 
Exception {
     // Workspace dir owned by "processUser", workspace group is "group0" and 
"user2" is not part of "group0"
-    final String tableWS = "drill_test_grp_0_755";
-    final String tableName = "table1";
-
-    UserRemoteException ex = null;
+    String tableWS = "drill_test_grp_0_755";
+    String tableName = "table1";
 
-    try {
-      updateClient(user2);
-
-      test("USE " + Joiner.on(".").join(MINI_DFS_STORAGE_PLUGIN_NAME, 
tableWS));
+    updateClient(user2);
+    test("use %s.`%s`", MINI_DFS_STORAGE_PLUGIN_NAME, tableWS);
 
-      test("CREATE TABLE " + tableName + " AS SELECT " +
-          "c_custkey, c_nationkey FROM cp.`tpch/customer.parquet` ORDER BY 
c_custkey;");
-    } catch(UserRemoteException e) {
-      ex = e;
-    }
+    thrown.expect(UserRemoteException.class);
+    thrown.expectMessage(containsString("Permission denied: 
user=drillTestUser2, " +
+        "access=WRITE, inode=\"/drill_test_grp_0_755/"));
 
-    assertNotNull("UserRemoteException is expected", ex);
-    assertThat(ex.getMessage(),
-        containsString("SYSTEM ERROR: RemoteException: Permission denied: 
user=drillTestUser2, access=WRITE, inode=\"/drill_test_grp_0_755/"));
+    test("CREATE TABLE %s AS SELECT c_custkey, c_nationkey " +
+        "FROM cp.`tpch/customer.parquet` ORDER BY c_custkey", tableName);
   }
 
   @Test
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestInfoSchema.java 
b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestInfoSchema.java
index e295eeee7e3..ee89c5c409d 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestInfoSchema.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestInfoSchema.java
@@ -49,7 +49,6 @@
  * -- InformationSchema
  * -- Queries on InformationSchema such as SHOW TABLES, SHOW SCHEMAS or 
DESCRIBE table
  * -- USE schema
- * -- SHOW FILES
  */
 @Category(SqlTest.class)
 public class TestInfoSchema extends BaseTestQuery {
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestFilesTable.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestFilesTable.java
index f8ea9a13d3e..35b0cbc0285 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestFilesTable.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestFilesTable.java
@@ -18,11 +18,11 @@
 package org.apache.drill.exec.store.ischema;
 
 import org.apache.drill.categories.SqlTest;
-import org.apache.drill.common.exceptions.UserRemoteException;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.test.ClusterFixture;
 import org.apache.drill.test.ClusterFixtureBuilder;
 import org.apache.drill.test.ClusterTest;
+import org.apache.drill.test.QueryBuilder;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -31,6 +31,7 @@
 
 import java.io.File;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
@@ -45,11 +46,22 @@ public static void setup() throws Exception {
     // create one workspace named files
     File filesWorkspace = cluster.makeDataDir("files", null, null);
 
-    // add data to the workspace: one file and folder with one file
-    assertTrue(new File(filesWorkspace, "file1.txt").createNewFile());
-    File folder = new File(filesWorkspace, "folder");
-    assertTrue(folder.mkdir());
-    assertTrue(new File(folder, "file2.txt").createNewFile());
+    /*
+      Add data to the workspace:
+       ../files
+       ../files/file0.txt
+       ../files/folder1
+       ../files/folder1/file1.txt
+       ../files/folder1/folder2
+       ../files/folder1/folder2/file2.txt
+     */
+    assertTrue(new File(filesWorkspace, "file0.txt").createNewFile());
+    File folder1 = new File(filesWorkspace, "folder1");
+    assertTrue(folder1.mkdir());
+    assertTrue(new File(folder1, "file1.txt").createNewFile());
+    File folder2 = new File(folder1, "folder2");
+    assertTrue(folder2.mkdir());
+    assertTrue(new File(folder2, "file2.txt").createNewFile());
   }
 
   @Rule
@@ -58,11 +70,12 @@ public static void setup() throws Exception {
   @Test
   public void testSelectWithoutRecursion() throws Exception {
     client.testBuilder()
-        .sqlQuery("select schema_name, root_schema_name, workspace_name, 
file_name, relative_path, is_directory, is_file from 
INFORMATION_SCHEMA.`FILES`")
+        .sqlQuery("select schema_name, root_schema_name, workspace_name, 
file_name, relative_path, is_directory, is_file " +
+            "from INFORMATION_SCHEMA.`FILES`")
         .unOrdered()
         .baselineColumns("schema_name", "root_schema_name", "workspace_name", 
"file_name", "relative_path", "is_directory", "is_file")
-        .baselineValues("dfs.files", "dfs", "files", "file1.txt", "file1.txt", 
false, true)
-        .baselineValues("dfs.files", "dfs", "files", "folder", "folder", true, 
false)
+        .baselineValues("dfs.files", "dfs", "files", "file0.txt", "file0.txt", 
false, true)
+        .baselineValues("dfs.files", "dfs", "files", "folder1", "folder1", 
true, false)
         .go();
   }
 
@@ -71,12 +84,15 @@ public void testSelectWithRecursion() throws Exception {
     try {
       client.alterSession(ExecConstants.LIST_FILES_RECURSIVELY, true);
       client.testBuilder()
-          .sqlQuery("select schema_name, root_schema_name, workspace_name, 
file_name, relative_path, is_directory, is_file from 
INFORMATION_SCHEMA.`FILES`")
+          .sqlQuery("select schema_name, root_schema_name, workspace_name, 
file_name, relative_path, is_directory, is_file " +
+              "from INFORMATION_SCHEMA.`FILES`")
           .unOrdered()
           .baselineColumns("schema_name", "root_schema_name", 
"workspace_name", "file_name", "relative_path", "is_directory", "is_file")
-          .baselineValues("dfs.files", "dfs", "files", "file1.txt", 
"file1.txt", false, true)
-          .baselineValues("dfs.files", "dfs", "files", "folder", "folder", 
true, false)
-          .baselineValues("dfs.files", "dfs", "files", "file2.txt", 
"folder/file2.txt", false, true)
+          .baselineValues("dfs.files", "dfs", "files", "file0.txt", 
"file0.txt", false, true)
+          .baselineValues("dfs.files", "dfs", "files", "folder1", "folder1", 
true, false)
+          .baselineValues("dfs.files", "dfs", "files", "file1.txt", 
"folder1/file1.txt", false, true)
+          .baselineValues("dfs.files", "dfs", "files", "folder2", 
"folder1/folder2", true, false)
+          .baselineValues("dfs.files", "dfs", "files", "file2.txt", 
"folder1/folder2/file2.txt", false, true)
           .go();
     } finally {
       client.resetSession(ExecConstants.LIST_FILES_RECURSIVELY);
@@ -86,62 +102,47 @@ public void testSelectWithRecursion() throws Exception {
 
   @Test
   public void testShowFilesWithInCondition() throws Exception {
-    client.testBuilder()
-        .sqlQuery("show files in dfs.`files`")
-        .unOrdered()
-        .sqlBaselineQuery("select * from INFORMATION_SCHEMA.`FILES` where 
schema_name = 'dfs.files'")
-        .go();
+    checkCounts("show files in dfs.`files`",
+        "select * from INFORMATION_SCHEMA.`FILES` where schema_name = 
'dfs.files'");
   }
 
   @Test
-  public void testShowFilesForSpecificFolderSuccess() throws Exception {
+  public void testShowFilesForSpecificDirectory() throws Exception {
     try {
+      client.alterSession(ExecConstants.LIST_FILES_RECURSIVELY, false);
+      QueryBuilder queryBuilder = client.queryBuilder().sql("show files in 
dfs.`files`.folder1");
+      QueryBuilder.QuerySummary querySummary = queryBuilder.run();
+      assertTrue(querySummary.succeeded());
+      assertEquals(2, querySummary.recordCount());
+      // option has no effect
       client.alterSession(ExecConstants.LIST_FILES_RECURSIVELY, true);
-      client.testBuilder()
-          .sqlQuery("show files in dfs.`files`.folder")
-          .unOrdered()
-          .sqlBaselineQuery("select * from INFORMATION_SCHEMA.`FILES` where 
schema_name = 'dfs.files' and relative_path like 'folder/%'")
-          .go();
+      querySummary = queryBuilder.run();
+      assertTrue(querySummary.succeeded());
+      assertEquals(2, querySummary.recordCount());
     } finally {
       client.resetSession(ExecConstants.LIST_FILES_RECURSIVELY);
     }
   }
 
-  @Test
-  public void testShowFilesForSpecificFolderFailure() throws Exception {
-    thrown.expect(UserRemoteException.class);
-    thrown.expectMessage(String.format("To SHOW FILES in specific directory, 
enable option %s", ExecConstants.LIST_FILES_RECURSIVELY));
-    queryBuilder().sql("show files in dfs.`files`.folder").run();
-  }
-
   @Test
   public void testShowFilesWithUseClause() throws Exception {
     queryBuilder().sql("use dfs.`files`").run();
-    client.testBuilder()
-        .sqlQuery("show files")
-        .unOrdered()
-        .sqlBaselineQuery("select * from INFORMATION_SCHEMA.`FILES` where 
schema_name = 'dfs.files'")
-        .go();
+    checkCounts("show files",
+        "select * from INFORMATION_SCHEMA.`FILES` where schema_name = 
'dfs.files'");
   }
 
   @Test
   public void testShowFilesWithPartialUseClause() throws Exception {
     queryBuilder().sql("use dfs").run();
-    client.testBuilder()
-        .sqlQuery("show files in `files`")
-        .unOrdered()
-        .sqlBaselineQuery("select * from INFORMATION_SCHEMA.`FILES` where 
schema_name = 'dfs.files'")
-        .go();
+    checkCounts("show files in `files`",
+        "select * from INFORMATION_SCHEMA.`FILES` where schema_name = 
'dfs.files'");
   }
 
   @Test
   public void testShowFilesForDefaultSchema() throws Exception {
-    queryBuilder().sql("use dfs").run();
-    client.testBuilder()
-        .sqlQuery("show files")
-        .unOrdered()
-        .sqlBaselineQuery("select * from INFORMATION_SCHEMA.`FILES` where 
schema_name = 'dfs.default'")
-        .go();
+    queryBuilder().sql("use dfs").run().succeeded();
+    checkCounts("show files",
+        "select * from INFORMATION_SCHEMA.`FILES` where schema_name = 
'dfs.default'");
   }
 
   @Test
@@ -165,5 +166,13 @@ public void testFilterPushDown_Full() throws Exception {
     assertFalse(plan.contains("Filter(condition="));
   }
 
+  private void checkCounts(String testQuery, String baseQuery) throws 
Exception {
+    QueryBuilder.QuerySummary testQuerySummary = 
queryBuilder().sql(testQuery).run();
+    assertTrue(testQuerySummary.succeeded());
+    QueryBuilder.QuerySummary baseQuerySummary = 
queryBuilder().sql(baseQuery).run();
+    assertTrue(baseQuerySummary.succeeded());
+    assertEquals(testQuerySummary.recordCount(), 
baseQuerySummary.recordCount());
+  }
+
 }
 
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestMetadataProvider.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestMetadataProvider.java
index 76c472c3023..de55f19e59f 100644
--- 
a/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestMetadataProvider.java
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestMetadataProvider.java
@@ -250,7 +250,7 @@ public void columns() throws Exception {
 
     assertEquals(RequestStatus.OK, resp.getStatus());
     List<ColumnMetadata> columns = resp.getColumnsList();
-    assertEquals(134, columns.size());
+    assertEquals(135, columns.size());
     // too many records to verify the output.
   }
 


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to