This is an automated email from the ASF dual-hosted git repository.
cgivre pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git
The following commit(s) were added to refs/heads/master by this push:
new b2aa1a9 DRILL-7707: Unable to analyze table metadata is it resides in
non-writable workspace
b2aa1a9 is described below
commit b2aa1a94ffd2a7b4df9d9204a0736e0f08c68546
Author: Volodymyr Vysotskyi <[email protected]>
AuthorDate: Mon May 11 20:31:38 2020 +0300
DRILL-7707: Unable to analyze table metadata is it resides in non-writable
workspace
---
.../org/apache/calcite/jdbc/DynamicRootSchema.java | 17 ++++++++--
.../drill/exec/planner/sql/SchemaUtilites.java | 4 +--
.../drill/exec/sql/TestMetastoreCommands.java | 36 ++++++++++++++++++++++
.../java/org/apache/drill/test/ClusterFixture.java | 16 ++++++++--
4 files changed, 66 insertions(+), 7 deletions(-)
diff --git
a/exec/java-exec/src/main/java/org/apache/calcite/jdbc/DynamicRootSchema.java
b/exec/java-exec/src/main/java/org/apache/calcite/jdbc/DynamicRootSchema.java
index 78421ab..0ae8f8c 100644
---
a/exec/java-exec/src/main/java/org/apache/calcite/jdbc/DynamicRootSchema.java
+++
b/exec/java-exec/src/main/java/org/apache/calcite/jdbc/DynamicRootSchema.java
@@ -22,11 +22,11 @@ import org.apache.calcite.DataContext;
import org.apache.calcite.linq4j.tree.Expression;
import org.apache.calcite.linq4j.tree.Expressions;
import org.apache.calcite.schema.SchemaPlus;
-import org.apache.calcite.schema.impl.AbstractSchema;
import org.apache.calcite.util.BuiltInMethod;
import org.apache.drill.common.exceptions.UserException;
import org.apache.drill.common.exceptions.UserExceptionUtils;
import org.apache.drill.exec.planner.sql.SchemaUtilites;
+import org.apache.drill.exec.store.AbstractSchema;
import org.apache.drill.exec.store.SchemaConfig;
import org.apache.drill.exec.store.StoragePlugin;
import org.apache.drill.exec.store.StoragePluginRegistry;
@@ -37,6 +37,7 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
/**
@@ -46,12 +47,14 @@ import java.util.List;
public class DynamicRootSchema extends DynamicSchema {
private static final Logger logger =
LoggerFactory.getLogger(DynamicRootSchema.class);
+ private static final String ROOT_SCHEMA_NAME = "";
+
private final SchemaConfig schemaConfig;
private final StoragePluginRegistry storages;
/** Creates a root schema. */
DynamicRootSchema(StoragePluginRegistry storages, SchemaConfig schemaConfig)
{
- super(null, new RootSchema(), "");
+ super(null, new RootSchema(), ROOT_SCHEMA_NAME);
this.schemaConfig = schemaConfig;
this.storages = storages;
}
@@ -131,6 +134,16 @@ public class DynamicRootSchema extends DynamicSchema {
}
static class RootSchema extends AbstractSchema {
+
+ public RootSchema() {
+ super(Collections.emptyList(), ROOT_SCHEMA_NAME);
+ }
+
+ @Override
+ public String getTypeName() {
+ return ROOT_SCHEMA_NAME;
+ }
+
@Override public Expression getExpression(SchemaPlus parentSchema,
String name) {
return Expressions.call(
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
index 5c03c61..ed0dda3 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
@@ -216,7 +216,7 @@ public class SchemaUtilites {
throwSchemaNotFoundException(defaultSchema,
SCHEMA_PATH_JOINER.join(schemaPath));
}
- if (isRootSchema(schema)) {
+ if (checkMutable && isRootSchema(schema)) {
throw UserException.validationError()
.message("Root schema is immutable. Drill does not allow creating or
deleting tables or views in the root schema. " +
"Select a schema using 'USE schema' command.")
@@ -224,7 +224,7 @@ public class SchemaUtilites {
}
final AbstractSchema drillSchema = unwrapAsDrillSchemaInstance(schema);
- if (!drillSchema.isMutable()) {
+ if (checkMutable && !drillSchema.isMutable()) {
throw UserException.validationError()
.message("Unable to create or drop objects. Schema [%s] is
immutable.", getSchemaPath(schema))
.build(logger);
diff --git
a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestMetastoreCommands.java
b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestMetastoreCommands.java
index 72989b3..e12722e 100644
---
a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestMetastoreCommands.java
+++
b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestMetastoreCommands.java
@@ -73,11 +73,14 @@ import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
@Category({SlowTest.class, MetastoreTest.class})
public class TestMetastoreCommands extends ClusterTest {
@@ -3495,6 +3498,39 @@ public class TestMetastoreCommands extends ClusterTest {
run("analyze table table(dfs.tmp.`%s` (type => 'parquet',
autoCorrectCorruptDates => false, enableStringsSignedMinMax=>false)) DROP
METADATA", tableName);
}
+ @Test
+ public void testAnalyzeWithClassPathSystem() throws Exception {
+ try {
+ run("analyze table cp.`employee.json` refresh metadata");
+ fail();
+ } catch (UserRemoteException e) {
+ assertThat(e.getMessage(), containsString("ClassPathFileSystem doesn't
currently support listing files"));
+ }
+ }
+
+ @Test
+ public void testAnalyzeWithRootSchema() throws Exception {
+ try {
+ run("analyze table t refresh metadata");
+ fail();
+ } catch (UserRemoteException e) {
+ assertThat(e.getMessage(), containsString("VALIDATION ERROR: No table
with given name [t] exists in schema []"));
+ }
+ }
+
+ @Test
+ public void testAnalyzeWithNonWritableWorkspace() throws Exception {
+ String tableName = "alltypes_optional";
+ String workspaceName = "immutable";
+ File table = dirTestWatcher.copyResourceToRoot(
+ Paths.get("parquet", "alltypes_optional.parquet"),
Paths.get(workspaceName, tableName));
+
+ cluster.defineImmutableWorkspace("dfs", workspaceName,
+ table.getAbsoluteFile().getParent(), null, null);
+
+ run("analyze table dfs.%s.%s refresh metadata", workspaceName, tableName);
+ }
+
public static <T> ColumnStatistics<T> getColumnStatistics(T minValue, T
maxValue,
long rowCount, TypeProtos.MinorType minorType) {
return new ColumnStatistics<>(
diff --git
a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
index 1ba053c..f6ba1df 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
@@ -481,9 +481,19 @@ public class ClusterFixture extends BaseFixture implements
AutoCloseable {
public void defineWorkspace(String pluginName, String schemaName, String
path,
String defaultFormat, FormatPluginConfig format) {
+ defineWorkspace(pluginName, schemaName, path, defaultFormat, format, true);
+ }
+
+ public void defineImmutableWorkspace(String pluginName, String schemaName,
String path,
+ String defaultFormat, FormatPluginConfig format) {
+ defineWorkspace(pluginName, schemaName, path, defaultFormat, format,
false);
+ }
+
+ private void defineWorkspace(String pluginName, String schemaName, String
path,
+ String defaultFormat, FormatPluginConfig format, boolean writable) {
for (Drillbit bit : drillbits()) {
try {
- defineWorkspace(bit, pluginName, schemaName, path, defaultFormat,
format);
+ defineWorkspace(bit, pluginName, schemaName, path, defaultFormat,
format, writable);
} catch (PluginException e) {
// This functionality is supposed to work in tests. Change
// exception to unchecked to make test code simpler.
@@ -496,11 +506,11 @@ public class ClusterFixture extends BaseFixture
implements AutoCloseable {
}
private void defineWorkspace(Drillbit drillbit, String pluginName,
- String schemaName, String path, String defaultFormat, FormatPluginConfig
format)
+ String schemaName, String path, String defaultFormat, FormatPluginConfig
format, boolean writable)
throws PluginException {
final StoragePluginRegistry pluginRegistry =
drillbit.getContext().getStorage();
final FileSystemConfig pluginConfig = (FileSystemConfig)
pluginRegistry.getStoredConfig(pluginName);
- final WorkspaceConfig newTmpWSConfig = new WorkspaceConfig(path, true,
defaultFormat, false);
+ final WorkspaceConfig newTmpWSConfig = new WorkspaceConfig(path, writable,
defaultFormat, false);
Map<String, WorkspaceConfig> newWorkspaces = new HashMap<>();
Optional.ofNullable(pluginConfig.getWorkspaces())