vnhive commented on a change in pull request #2037:
URL: https://github.com/apache/hive/pull/2037#discussion_r598456248
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/ddl/dataconnector/desc/DescDataConnectorDesc.java
##########
@@ -0,0 +1,71 @@
+/*
Review comment:
Just a funny observation and requires no action, I noticed that the name
of the file is **Desc**DataConnector**Desc**
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/ddl/dataconnector/alter/AbstractAlterDataConnectorAnalyzer.java
##########
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.dataconnector.alter;
+
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.ddl.DDLWork;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+/**
+ * Analyzer for data connector alteration commands.
+ */
+public abstract class AbstractAlterDataConnectorAnalyzer extends
BaseSemanticAnalyzer {
+ public AbstractAlterDataConnectorAnalyzer(QueryState queryState) throws
SemanticException {
+ super(queryState);
+ }
+
+ protected void addAlterDataConnectorDesc(AbstractAlterDataConnectorDesc
alterDesc) throws SemanticException {
+ DataConnector connector = getDataConnector(alterDesc.getConnectorName());
Review comment:
Do we have to check if the connector object returned is null? Can you
please verify that this is being done in the other analyze classes ?
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/ddl/dataconnector/drop/DropDataConnectorOperation.java
##########
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.dataconnector.drop;
+
+import org.apache.hadoop.hive.llap.LlapHiveUtils;
+import org.apache.hadoop.hive.llap.ProactiveEviction;
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+
+/**
+ * Operation process of dropping a data connector.
+ */
+public class DropDataConnectorOperation extends
DDLOperation<DropDataConnectorDesc> {
+ public DropDataConnectorOperation(DDLOperationContext context,
DropDataConnectorDesc desc) {
+ super(context, desc);
+ }
+
+ @Override
+ public int execute() throws HiveException {
+ try {
+ String dcName = desc.getConnectorName();
+ ReplicationSpec replicationSpec = desc.getReplicationSpec();
+ if (replicationSpec.isInReplicationScope()) {
+ DataConnector connector = context.getDb().getDataConnector(dcName);
+ if (connector == null ||
!replicationSpec.allowEventReplacementInto(connector.getParameters())) {
+ return 0;
Review comment:
The return value is 0 in the case when the connector is not found and
when the execute method is successful in line no 63. Should you be throwing an
exception in one of the cases ?
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/ddl/dataconnector/drop/DropDataConnectorAnalyzer.java
##########
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.dataconnector.drop;
+
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType;
+import org.apache.hadoop.hive.ql.ddl.DDLWork;
+import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.HiveParser;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+/**
+ * Analyzer for data connector dropping commands.
+ */
+@DDLType(types = HiveParser.TOK_DROPDATACONNECTOR)
+public class DropDataConnectorAnalyzer extends BaseSemanticAnalyzer {
+ public DropDataConnectorAnalyzer(QueryState queryState) throws
SemanticException {
+ super(queryState);
+ }
+
+ @Override
+ public void analyzeInternal(ASTNode root) throws SemanticException {
+ String connectorName = unescapeIdentifier(root.getChild(0).getText());
+ boolean ifExists = root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) !=
null;
+
+ DataConnector connector = getDataConnector(connectorName, !ifExists);
+ if (connector == null) {
+ return;
Review comment:
Don't we have to throw the following exception here,
if (connector == null) {
throw new HiveException(ErrorMsg.DATACONNECTOR_NOT_EXISTS,
desc.getConnectorName());
}
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
##########
@@ -0,0 +1,311 @@
+package org.apache.hadoop.hive.metastore.dataconnector.jdbc;
+
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import
org.apache.hadoop.hive.metastore.dataconnector.AbstractDataConnectorProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractJDBCConnectorProvider extends
AbstractDataConnectorProvider {
+ private static Logger LOG =
LoggerFactory.getLogger(AbstractJDBCConnectorProvider.class);
+ protected static Warehouse warehouse = null;
+
+ // duplicate constants from Constants.java to avoid a dependency on
hive-common
+ public static final String JDBC_HIVE_STORAGE_HANDLER_ID =
+ "org.apache.hive.storage.jdbc.JdbcStorageHandler";
+ public static final String JDBC_CONFIG_PREFIX = "hive.sql";
+ public static final String JDBC_CATALOG = JDBC_CONFIG_PREFIX + ".catalog";
+ public static final String JDBC_SCHEMA = JDBC_CONFIG_PREFIX + ".schema";
+ public static final String JDBC_TABLE = JDBC_CONFIG_PREFIX + ".table";
+ public static final String JDBC_DATABASE_TYPE = JDBC_CONFIG_PREFIX +
".database.type";
+ public static final String JDBC_URL = JDBC_CONFIG_PREFIX + ".jdbc.url";
+ public static final String JDBC_DRIVER = JDBC_CONFIG_PREFIX + ".jdbc.driver";
+ public static final String JDBC_USERNAME = JDBC_CONFIG_PREFIX +
".dbcp.username";
+ public static final String JDBC_PASSWORD = JDBC_CONFIG_PREFIX +
".dbcp.password";
+ public static final String JDBC_KEYSTORE = JDBC_CONFIG_PREFIX +
".dbcp.password.keystore";
+ public static final String JDBC_KEY = JDBC_CONFIG_PREFIX +
".dbcp.password.key";
+ public static final String JDBC_QUERY = JDBC_CONFIG_PREFIX + ".query";
+ public static final String JDBC_QUERY_FIELD_NAMES = JDBC_CONFIG_PREFIX +
".query.fieldNames";
+ public static final String JDBC_QUERY_FIELD_TYPES = JDBC_CONFIG_PREFIX +
".query.fieldTypes";
+ public static final String JDBC_SPLIT_QUERY = JDBC_CONFIG_PREFIX +
".query.split";
+ public static final String JDBC_PARTITION_COLUMN = JDBC_CONFIG_PREFIX +
".partitionColumn";
+ public static final String JDBC_NUM_PARTITIONS = JDBC_CONFIG_PREFIX +
".numPartitions";
+ public static final String JDBC_LOW_BOUND = JDBC_CONFIG_PREFIX +
".lowerBound";
+ public static final String JDBC_UPPER_BOUND = JDBC_CONFIG_PREFIX +
".upperBound";
+
+ private static final String JDBC_INPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcInputFormat".intern();
+ private static final String JDBC_OUTPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcOutputFormat".intern();
+
+ String type = null; // MYSQL, POSTGRES, ORACLE, DERBY, MSSQL, DB2 etc.
+ String driverClassName = null;
+ String jdbcUrl = null;
+ String username = null;
+ String password = null; // TODO convert to byte array
+
+ public AbstractJDBCConnectorProvider(String dbName, DataConnector dataConn) {
+ super(dbName, dataConn);
+ this.type = connector.getType().toUpperCase(); // TODO
+ this.jdbcUrl = connector.getUrl();
+ this.username = connector.getParameters().get(JDBC_USERNAME);
+ this.password = connector.getParameters().get(JDBC_PASSWORD);
+ if (this.password == null) {
+ String keystore = connector.getParameters().get(JDBC_KEYSTORE);
+ String key = connector.getParameters().get(JDBC_KEY);
+ try {
+ char[] keyValue = MetastoreConf.getValueFromKeystore(keystore, key);
+ if (keyValue != null)
+ this.password = new String(keyValue);
+ } catch (IOException i) {
+ LOG.warn("Could not read key value from keystore");
+ }
+ }
+
+ try {
+ warehouse = new Warehouse(MetastoreConf.newMetastoreConf());
+ } catch (MetaException e) { /* ignore */ }
+ }
+
+ @Override public void open() throws ConnectException {
+ try {
+ Class.forName(driverClassName);
+ handle = DriverManager.getConnection(jdbcUrl, username, password);
+ isOpen = true;
+ } catch (ClassNotFoundException cnfe) {
+ LOG.warn("Driver class not found in classpath:" + driverClassName);
+ throw new RuntimeException("Driver class not found:" + driverClassName);
Review comment:
Please wrap the ClassNotFoundException also here to retain the exception
history.
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
##########
@@ -0,0 +1,311 @@
+package org.apache.hadoop.hive.metastore.dataconnector.jdbc;
+
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import
org.apache.hadoop.hive.metastore.dataconnector.AbstractDataConnectorProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractJDBCConnectorProvider extends
AbstractDataConnectorProvider {
+ private static Logger LOG =
LoggerFactory.getLogger(AbstractJDBCConnectorProvider.class);
+ protected static Warehouse warehouse = null;
+
+ // duplicate constants from Constants.java to avoid a dependency on
hive-common
+ public static final String JDBC_HIVE_STORAGE_HANDLER_ID =
+ "org.apache.hive.storage.jdbc.JdbcStorageHandler";
+ public static final String JDBC_CONFIG_PREFIX = "hive.sql";
+ public static final String JDBC_CATALOG = JDBC_CONFIG_PREFIX + ".catalog";
+ public static final String JDBC_SCHEMA = JDBC_CONFIG_PREFIX + ".schema";
+ public static final String JDBC_TABLE = JDBC_CONFIG_PREFIX + ".table";
+ public static final String JDBC_DATABASE_TYPE = JDBC_CONFIG_PREFIX +
".database.type";
+ public static final String JDBC_URL = JDBC_CONFIG_PREFIX + ".jdbc.url";
+ public static final String JDBC_DRIVER = JDBC_CONFIG_PREFIX + ".jdbc.driver";
+ public static final String JDBC_USERNAME = JDBC_CONFIG_PREFIX +
".dbcp.username";
+ public static final String JDBC_PASSWORD = JDBC_CONFIG_PREFIX +
".dbcp.password";
+ public static final String JDBC_KEYSTORE = JDBC_CONFIG_PREFIX +
".dbcp.password.keystore";
+ public static final String JDBC_KEY = JDBC_CONFIG_PREFIX +
".dbcp.password.key";
+ public static final String JDBC_QUERY = JDBC_CONFIG_PREFIX + ".query";
+ public static final String JDBC_QUERY_FIELD_NAMES = JDBC_CONFIG_PREFIX +
".query.fieldNames";
+ public static final String JDBC_QUERY_FIELD_TYPES = JDBC_CONFIG_PREFIX +
".query.fieldTypes";
+ public static final String JDBC_SPLIT_QUERY = JDBC_CONFIG_PREFIX +
".query.split";
+ public static final String JDBC_PARTITION_COLUMN = JDBC_CONFIG_PREFIX +
".partitionColumn";
+ public static final String JDBC_NUM_PARTITIONS = JDBC_CONFIG_PREFIX +
".numPartitions";
+ public static final String JDBC_LOW_BOUND = JDBC_CONFIG_PREFIX +
".lowerBound";
+ public static final String JDBC_UPPER_BOUND = JDBC_CONFIG_PREFIX +
".upperBound";
+
+ private static final String JDBC_INPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcInputFormat".intern();
+ private static final String JDBC_OUTPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcOutputFormat".intern();
+
+ String type = null; // MYSQL, POSTGRES, ORACLE, DERBY, MSSQL, DB2 etc.
+ String driverClassName = null;
+ String jdbcUrl = null;
+ String username = null;
+ String password = null; // TODO convert to byte array
+
+ public AbstractJDBCConnectorProvider(String dbName, DataConnector dataConn) {
+ super(dbName, dataConn);
+ this.type = connector.getType().toUpperCase(); // TODO
+ this.jdbcUrl = connector.getUrl();
+ this.username = connector.getParameters().get(JDBC_USERNAME);
+ this.password = connector.getParameters().get(JDBC_PASSWORD);
+ if (this.password == null) {
+ String keystore = connector.getParameters().get(JDBC_KEYSTORE);
+ String key = connector.getParameters().get(JDBC_KEY);
+ try {
+ char[] keyValue = MetastoreConf.getValueFromKeystore(keystore, key);
+ if (keyValue != null)
+ this.password = new String(keyValue);
+ } catch (IOException i) {
+ LOG.warn("Could not read key value from keystore");
+ }
+ }
+
+ try {
+ warehouse = new Warehouse(MetastoreConf.newMetastoreConf());
+ } catch (MetaException e) { /* ignore */ }
+ }
+
+ @Override public void open() throws ConnectException {
+ try {
+ Class.forName(driverClassName);
+ handle = DriverManager.getConnection(jdbcUrl, username, password);
+ isOpen = true;
+ } catch (ClassNotFoundException cnfe) {
+ LOG.warn("Driver class not found in classpath:" + driverClassName);
+ throw new RuntimeException("Driver class not found:" + driverClassName);
+ } catch (SQLException sqle) {
+ LOG.warn("Could not connect to remote data source at " + jdbcUrl);
+ throw new ConnectException("Could not connect to remote datasource at "
+ jdbcUrl + ",cause:" + sqle.getMessage());
Review comment:
Please add the SQLException also to retain the exception history.
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/ddl/dataconnector/desc/DescDataConnectorFormatter.java
##########
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.dataconnector.desc;
+
+import org.apache.commons.collections.MapUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.ql.ddl.ShowUtils;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.formatting.MapBuilder;
+import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils;
+import org.apache.hive.common.util.HiveStringUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.Map;
+
+/**
+ * Formats DESC CONNECTOR results.
+ */
+abstract class DescDataConnectorFormatter {
+ static DescDataConnectorFormatter getFormatter(HiveConf conf) {
+ if (MetaDataFormatUtils.isJson(conf)) {
+ return new JsonDescDataConnectorFormatter();
+ } else {
+ return new TextDescDataConnectorFormatter();
+ }
+ }
+
+ abstract void showDataConnectorDescription(DataOutputStream out, String
connector, String type, String url,
+ String ownerName, PrincipalType ownerType, String comment, Map<String,
String> params)
+ throws HiveException;
+
+ // ------ Implementations ------
+
+ static class JsonDescDataConnectorFormatter extends
DescDataConnectorFormatter {
+ @Override
+ void showDataConnectorDescription(DataOutputStream out, String connector,
String type, String url,
+ String ownerName, PrincipalType ownerType, String comment, Map<String,
String> params)
+ throws HiveException {
+ MapBuilder builder = MapBuilder.create()
+ .put("connector", connector)
+ .put("type", type)
+ .put("url", url);
+ if (ownerName != null) {
+ builder.put("owner", ownerName);
+ }
+ if (ownerType != null) {
+ builder.put("ownerType", ownerType.name());
+ }
+ if (comment != null) {
+ builder.put("comment", comment);
+ }
+ if (MapUtils.isNotEmpty(params)) {
+ builder.put("params", params);
+ }
+ ShowUtils.asJson(out, builder.build());
+ }
+ }
+
+ static class TextDescDataConnectorFormatter extends
DescDataConnectorFormatter {
+ @Override
+ void showDataConnectorDescription(DataOutputStream out, String connector,
String type, String url,
+ String ownerName, PrincipalType ownerType, String comment, Map<String,
String> params)
+ throws HiveException {
+ try {
+ out.write(connector.getBytes(StandardCharsets.UTF_8));
+ out.write(Utilities.tabCode);
+ if (type != null) {
+ out.write(type.getBytes(StandardCharsets.UTF_8));
+ }
+ out.write(Utilities.tabCode);
+ if (url != null) {
+ out.write(url.getBytes(StandardCharsets.UTF_8));
+ }
+ out.write(Utilities.tabCode);
+ if (ownerName != null) {
+ out.write(ownerName.getBytes(StandardCharsets.UTF_8));
+ }
+ out.write(Utilities.tabCode);
+ if (ownerType != null) {
+ out.write(ownerType.name().getBytes(StandardCharsets.UTF_8));
+ }
+ out.write(Utilities.tabCode);
+ if (comment != null) {
+
out.write(HiveStringUtils.escapeJava(comment).getBytes(StandardCharsets.UTF_8));
+ }
+ out.write(Utilities.tabCode);
+ if (MapUtils.isNotEmpty(params)) {
Review comment:
I was wondering how we could handle such a redundant code structure,
I think we can use a lambda function in the following way,
ArrayList<String> dcDescription = new ArrayList<string>();
dcDescription.add(connector);
dcDescription.add(type);
dcDescription.add(ownerName);
dcDescription.add(ownerType);
dcDescription.add(HiveStringUtils.escapeJava(comment));
dcDecription.add(params.toString());
Consumer<String> description_handler = (param) -> {
out.write(param.getBytes(StandardCharsets.UTF_8));};
dcDescription.forEach(param);
But then your way is probably more readable.
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/ddl/dataconnector/drop/DropDataConnectorDesc.java
##########
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.dataconnector.drop;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+/**
+ * DDL task description for DROP CONNECTOR commands.
+ */
+@Explain(displayName = "Drop Connector", explainLevels = { Level.USER,
Level.DEFAULT, Level.EXTENDED })
+public class DropDataConnectorDesc implements DDLDesc, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final String connectorName;
+ private final boolean ifExists;
+ private final ReplicationSpec replicationSpec;
+
+ public DropDataConnectorDesc(String connectorName, boolean ifExists,
ReplicationSpec replicationSpec) {
+ this(connectorName, ifExists, false, replicationSpec);
+ }
+
+ public DropDataConnectorDesc(String connectorName, boolean ifExists, boolean
cascade, ReplicationSpec replicationSpec) {
+ this.connectorName = connectorName;
+ this.ifExists = ifExists;
+ this.replicationSpec = replicationSpec;
+ }
+
+ @Explain(displayName = "connector", explainLevels = { Level.USER,
Level.DEFAULT, Level.EXTENDED })
+ public String getConnectorName() {
+ return connectorName;
+ }
+
+ @Explain(displayName = "if exists")
+ public boolean getIfExists() {
+ return ifExists;
+ }
+
+ public ReplicationSpec getReplicationSpec() {
Review comment:
Missing @Explain annotation
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/ddl/dataconnector/show/ShowDataConnectorsOperation.java
##########
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.dataconnector.show;
+
+import java.io.DataOutputStream;
+import java.util.List;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.ShowUtils;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.UDFLike;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * Operation process of showing data connectors.
+ */
+public class ShowDataConnectorsOperation extends
DDLOperation<ShowDataConnectorsDesc> {
+ public ShowDataConnectorsOperation(DDLOperationContext context,
ShowDataConnectorsDesc desc) {
+ super(context, desc);
+ }
+
+ @Override
+ public int execute() throws HiveException {
+ List<String> connectors = context.getDb().getAllDataConnectors();
+ if (desc.getPattern() != null) {
+ LOG.debug("pattern: {}", desc.getPattern());
+ Pattern pattern =
Pattern.compile(UDFLike.likePatternToRegExp(desc.getPattern()),
Pattern.CASE_INSENSITIVE);
+ connectors = connectors.stream().filter(name ->
pattern.matcher(name).matches()).collect(Collectors.toList());
+ }
+
+ LOG.info("Found {} connector(s) matching the SHOW CONNECTORS statement.",
connectors.size());
Review comment:
I did not find logging in the files I reviewed so far. I will leave it
to you to check if logging is required in the other modified files too.
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/JDBCConnectorProviderFactory.java
##########
@@ -0,0 +1,45 @@
+package org.apache.hadoop.hive.metastore.dataconnector;
+
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import
org.apache.hadoop.hive.metastore.dataconnector.jdbc.DerbySQLConnectorProvider;
+import
org.apache.hadoop.hive.metastore.dataconnector.jdbc.MySQLConnectorProvider;
+import
org.apache.hadoop.hive.metastore.dataconnector.jdbc.PostgreSQLConnectorProvider;
+
+import static
org.apache.hadoop.hive.metastore.dataconnector.IDataConnectorProvider.*;
+
+public class JDBCConnectorProviderFactory {
+
+ public static IDataConnectorProvider get(String dbName, DataConnector
connector) {
+ IDataConnectorProvider provider = null;
+ switch(connector.getType().toLowerCase()) {
+ case MYSQL_TYPE:
+ provider = new MySQLConnectorProvider(dbName, connector);
+ /*
Review comment:
Please consider removing the commented code here and in other places.
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/ddl/dataconnector/show/ShowDataConnectorsOperation.java
##########
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.dataconnector.show;
+
+import java.io.DataOutputStream;
+import java.util.List;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.ShowUtils;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.UDFLike;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * Operation process of showing data connectors.
+ */
+public class ShowDataConnectorsOperation extends
DDLOperation<ShowDataConnectorsDesc> {
+ public ShowDataConnectorsOperation(DDLOperationContext context,
ShowDataConnectorsDesc desc) {
+ super(context, desc);
+ }
+
+ @Override
+ public int execute() throws HiveException {
+ List<String> connectors = context.getDb().getAllDataConnectors();
+ if (desc.getPattern() != null) {
+ LOG.debug("pattern: {}", desc.getPattern());
+ Pattern pattern =
Pattern.compile(UDFLike.likePatternToRegExp(desc.getPattern()),
Pattern.CASE_INSENSITIVE);
+ connectors = connectors.stream().filter(name ->
pattern.matcher(name).matches()).collect(Collectors.toList());
Review comment:
If connectors were null here it will result in a null pointer exception.
##########
File path:
ql/src/test/results/clientpositive/llap/alter_change_db_location.q.out
##########
@@ -11,7 +11,7 @@ PREHOOK: Input: database:newdb
POSTHOOK: query: describe database extended newDB
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:newdb
-newdb location/in/test hive_test_user USER
+newdb location/in/test hive_test_user USER
Review comment:
This looks like a whitespace change. But I guess this file was generated
automatically and you can't do anything about it, right ?
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/ddl/dataconnector/show/ShowDataConnectorsOperation.java
##########
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.dataconnector.show;
+
+import java.io.DataOutputStream;
+import java.util.List;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.ShowUtils;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.UDFLike;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * Operation process of showing data connectors.
+ */
+public class ShowDataConnectorsOperation extends
DDLOperation<ShowDataConnectorsDesc> {
+ public ShowDataConnectorsOperation(DDLOperationContext context,
ShowDataConnectorsDesc desc) {
+ super(context, desc);
+ }
+
+ @Override
+ public int execute() throws HiveException {
+ List<String> connectors = context.getDb().getAllDataConnectors();
Review comment:
Do we need if check if the connectors returned is null here ?
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
##########
@@ -1160,6 +1163,38 @@ private static void
updateTableAggregatePartitionColStats(RawStore rawStore, Str
return sharedCache.listCachedDatabases(catName);
}
+ @Override public void createDataConnector(DataConnector connector) throws
InvalidObjectException, MetaException {
+ rawStore.createDataConnector(connector);
+ // in case of event based cache update, cache will be updated during
commit.
+ /*
Review comment:
Please consider removing commented code here and in other places.
##########
File path: ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
##########
@@ -1719,6 +1720,23 @@ protected Database getDatabase(String dbName, boolean
throwException) throws Sem
return database;
}
+ protected DataConnector getDataConnector(String dbName) throws
SemanticException {
+ return getDataConnector(dbName, true);
+ }
+
+ protected DataConnector getDataConnector(String dcName, boolean
throwException) throws SemanticException {
+ DataConnector connector;
+ try {
+ connector = db.getDataConnector(dcName);
+ } catch (Exception e) {
+ throw new SemanticException(e.getMessage(), e);
Review comment:
Can't it be just wrapped inside SemanticException, do we need to do an
e.getMessage()?
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
##########
@@ -0,0 +1,311 @@
+package org.apache.hadoop.hive.metastore.dataconnector.jdbc;
+
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import
org.apache.hadoop.hive.metastore.dataconnector.AbstractDataConnectorProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractJDBCConnectorProvider extends
AbstractDataConnectorProvider {
+ private static Logger LOG =
LoggerFactory.getLogger(AbstractJDBCConnectorProvider.class);
+ protected static Warehouse warehouse = null;
+
+ // duplicate constants from Constants.java to avoid a dependency on
hive-common
+ public static final String JDBC_HIVE_STORAGE_HANDLER_ID =
+ "org.apache.hive.storage.jdbc.JdbcStorageHandler";
+ public static final String JDBC_CONFIG_PREFIX = "hive.sql";
+ public static final String JDBC_CATALOG = JDBC_CONFIG_PREFIX + ".catalog";
+ public static final String JDBC_SCHEMA = JDBC_CONFIG_PREFIX + ".schema";
+ public static final String JDBC_TABLE = JDBC_CONFIG_PREFIX + ".table";
+ public static final String JDBC_DATABASE_TYPE = JDBC_CONFIG_PREFIX +
".database.type";
+ public static final String JDBC_URL = JDBC_CONFIG_PREFIX + ".jdbc.url";
+ public static final String JDBC_DRIVER = JDBC_CONFIG_PREFIX + ".jdbc.driver";
+ public static final String JDBC_USERNAME = JDBC_CONFIG_PREFIX +
".dbcp.username";
+ public static final String JDBC_PASSWORD = JDBC_CONFIG_PREFIX +
".dbcp.password";
+ public static final String JDBC_KEYSTORE = JDBC_CONFIG_PREFIX +
".dbcp.password.keystore";
+ public static final String JDBC_KEY = JDBC_CONFIG_PREFIX +
".dbcp.password.key";
+ public static final String JDBC_QUERY = JDBC_CONFIG_PREFIX + ".query";
+ public static final String JDBC_QUERY_FIELD_NAMES = JDBC_CONFIG_PREFIX +
".query.fieldNames";
+ public static final String JDBC_QUERY_FIELD_TYPES = JDBC_CONFIG_PREFIX +
".query.fieldTypes";
+ public static final String JDBC_SPLIT_QUERY = JDBC_CONFIG_PREFIX +
".query.split";
+ public static final String JDBC_PARTITION_COLUMN = JDBC_CONFIG_PREFIX +
".partitionColumn";
+ public static final String JDBC_NUM_PARTITIONS = JDBC_CONFIG_PREFIX +
".numPartitions";
+ public static final String JDBC_LOW_BOUND = JDBC_CONFIG_PREFIX +
".lowerBound";
+ public static final String JDBC_UPPER_BOUND = JDBC_CONFIG_PREFIX +
".upperBound";
+
+ private static final String JDBC_INPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcInputFormat".intern();
+ private static final String JDBC_OUTPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcOutputFormat".intern();
+
+ String type = null; // MYSQL, POSTGRES, ORACLE, DERBY, MSSQL, DB2 etc.
+ String driverClassName = null;
+ String jdbcUrl = null;
+ String username = null;
+ String password = null; // TODO convert to byte array
+
+ public AbstractJDBCConnectorProvider(String dbName, DataConnector dataConn) {
+ super(dbName, dataConn);
+ this.type = connector.getType().toUpperCase(); // TODO
+ this.jdbcUrl = connector.getUrl();
+ this.username = connector.getParameters().get(JDBC_USERNAME);
+ this.password = connector.getParameters().get(JDBC_PASSWORD);
+ if (this.password == null) {
+ String keystore = connector.getParameters().get(JDBC_KEYSTORE);
+ String key = connector.getParameters().get(JDBC_KEY);
+ try {
+ char[] keyValue = MetastoreConf.getValueFromKeystore(keystore, key);
+ if (keyValue != null)
+ this.password = new String(keyValue);
+ } catch (IOException i) {
+ LOG.warn("Could not read key value from keystore");
+ }
+ }
+
+ try {
+ warehouse = new Warehouse(MetastoreConf.newMetastoreConf());
+ } catch (MetaException e) { /* ignore */ }
+ }
+
+ @Override public void open() throws ConnectException {
+ try {
+ Class.forName(driverClassName);
+ handle = DriverManager.getConnection(jdbcUrl, username, password);
+ isOpen = true;
+ } catch (ClassNotFoundException cnfe) {
+ LOG.warn("Driver class not found in classpath:" + driverClassName);
+ throw new RuntimeException("Driver class not found:" + driverClassName);
+ } catch (SQLException sqle) {
+ LOG.warn("Could not connect to remote data source at " + jdbcUrl);
+ throw new ConnectException("Could not connect to remote datasource at "
+ jdbcUrl + ",cause:" + sqle.getMessage());
+ }
+ }
+
+ protected Connection getConnection() {
+ try {
+ if (!isOpen)
+ open();
+ } catch (ConnectException ce) {
+ throw new RuntimeException(ce.getMessage());
+ }
+
+ if (handle instanceof Connection)
+ return (Connection)handle;
+
+ throw new RuntimeException("unexpected type for connection handle");
+ }
+
+ @Override public void close() {
+ if (isOpen) {
+ try {
+ ((Connection)handle).close();
+ } catch (SQLException sqle) {
+ LOG.warn("Could not close jdbc connection to " + jdbcUrl, sqle);
+ }
+ }
+ }
+
+ /**
+ * Returns Hive Table objects from the remote database for tables that match
a name pattern.
+ * @return List A collection of objects that match the name pattern, null
otherwise.
+ * @throws MetaException To indicate any failures with executing this API
+ * @param regex
+ */
+ @Override public abstract List<Table> getTables(String regex) throws
MetaException;
+
+ /**
+ * Returns a list of all table names from the remote database.
+ * @return List A collection of all the table names, null if there are no
tables.
+ * @throws MetaException To indicate any failures with executing this API
+ */
+ @Override public List<String> getTableNames() throws MetaException {
+ ResultSet rs = null;
+ try {
+ rs = getConnection().getMetaData().getTables(scoped_db, null, null, new
String[] { "TABLE" });
+ if (rs != null) {
+ List<String> tables = new ArrayList<String>();
+ while(rs.next()) {
+ tables.add(rs.getString(3));
+ }
+ return tables;
+ }
+ } catch (SQLException sqle) {
+ LOG.warn("Could not retrieve table names from remote datasource, cause:"
+ sqle.getMessage());
Review comment:
Don't we want to throw the SQLException forward here? We can wrap it in
a MetaException and let the higher layers handle it?
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/DerbySQLConnectorProvider.java
##########
@@ -0,0 +1,86 @@
+package org.apache.hadoop.hive.metastore.dataconnector.jdbc;
+
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.List;
+
+public class DerbySQLConnectorProvider extends AbstractJDBCConnectorProvider {
+ private static Logger LOG =
LoggerFactory.getLogger(DerbySQLConnectorProvider.class);
+
+ // private static final String DRIVER_CLASS =
"org.apache.derby.jdbc.EmbeddedDriver".intern();
+ private static final String DRIVER_CLASS =
"org.apache.derby.jdbc.AutoloadedDriver".intern();
+
+ public DerbySQLConnectorProvider(String dbName, DataConnector connector) {
+ super(dbName, connector);
+ driverClassName = DRIVER_CLASS;
+ }
+
+ /**
+ * Returns a list of all table names from the remote database.
+ * @return List A collection of all the table names, null if there are no
tables.
+ * @throws IOException To indicate any failures with executing this API
+ */
+ @Override
+ protected ResultSet fetchTableNames() throws MetaException {
+ ResultSet rs = null;
+ try {
+ rs = getConnection().getMetaData().getTables(scoped_db, null, null, new
String[] { "TABLE" });
+ } catch (SQLException sqle) {
+ LOG.warn("Could not retrieve table names from remote datasource, cause:"
+ sqle.getMessage());
+ throw new MetaException("Could not retrieve table names from remote
datasource, cause:" + sqle.getMessage());
Review comment:
Please consider wrapping sqle in the MetaException while throwing.
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
##########
@@ -0,0 +1,311 @@
+package org.apache.hadoop.hive.metastore.dataconnector.jdbc;
+
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import
org.apache.hadoop.hive.metastore.dataconnector.AbstractDataConnectorProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractJDBCConnectorProvider extends
AbstractDataConnectorProvider {
+ private static Logger LOG =
LoggerFactory.getLogger(AbstractJDBCConnectorProvider.class);
+ protected static Warehouse warehouse = null;
+
+ // duplicate constants from Constants.java to avoid a dependency on
hive-common
+ public static final String JDBC_HIVE_STORAGE_HANDLER_ID =
+ "org.apache.hive.storage.jdbc.JdbcStorageHandler";
+ public static final String JDBC_CONFIG_PREFIX = "hive.sql";
+ public static final String JDBC_CATALOG = JDBC_CONFIG_PREFIX + ".catalog";
+ public static final String JDBC_SCHEMA = JDBC_CONFIG_PREFIX + ".schema";
+ public static final String JDBC_TABLE = JDBC_CONFIG_PREFIX + ".table";
+ public static final String JDBC_DATABASE_TYPE = JDBC_CONFIG_PREFIX +
".database.type";
+ public static final String JDBC_URL = JDBC_CONFIG_PREFIX + ".jdbc.url";
+ public static final String JDBC_DRIVER = JDBC_CONFIG_PREFIX + ".jdbc.driver";
+ public static final String JDBC_USERNAME = JDBC_CONFIG_PREFIX +
".dbcp.username";
+ public static final String JDBC_PASSWORD = JDBC_CONFIG_PREFIX +
".dbcp.password";
+ public static final String JDBC_KEYSTORE = JDBC_CONFIG_PREFIX +
".dbcp.password.keystore";
+ public static final String JDBC_KEY = JDBC_CONFIG_PREFIX +
".dbcp.password.key";
+ public static final String JDBC_QUERY = JDBC_CONFIG_PREFIX + ".query";
+ public static final String JDBC_QUERY_FIELD_NAMES = JDBC_CONFIG_PREFIX +
".query.fieldNames";
+ public static final String JDBC_QUERY_FIELD_TYPES = JDBC_CONFIG_PREFIX +
".query.fieldTypes";
+ public static final String JDBC_SPLIT_QUERY = JDBC_CONFIG_PREFIX +
".query.split";
+ public static final String JDBC_PARTITION_COLUMN = JDBC_CONFIG_PREFIX +
".partitionColumn";
+ public static final String JDBC_NUM_PARTITIONS = JDBC_CONFIG_PREFIX +
".numPartitions";
+ public static final String JDBC_LOW_BOUND = JDBC_CONFIG_PREFIX +
".lowerBound";
+ public static final String JDBC_UPPER_BOUND = JDBC_CONFIG_PREFIX +
".upperBound";
+
+ private static final String JDBC_INPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcInputFormat".intern();
+ private static final String JDBC_OUTPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcOutputFormat".intern();
+
+ String type = null; // MYSQL, POSTGRES, ORACLE, DERBY, MSSQL, DB2 etc.
+ String driverClassName = null;
+ String jdbcUrl = null;
+ String username = null;
+ String password = null; // TODO convert to byte array
+
+ public AbstractJDBCConnectorProvider(String dbName, DataConnector dataConn) {
+ super(dbName, dataConn);
+ this.type = connector.getType().toUpperCase(); // TODO
+ this.jdbcUrl = connector.getUrl();
+ this.username = connector.getParameters().get(JDBC_USERNAME);
+ this.password = connector.getParameters().get(JDBC_PASSWORD);
+ if (this.password == null) {
+ String keystore = connector.getParameters().get(JDBC_KEYSTORE);
+ String key = connector.getParameters().get(JDBC_KEY);
+ try {
+ char[] keyValue = MetastoreConf.getValueFromKeystore(keystore, key);
+ if (keyValue != null)
+ this.password = new String(keyValue);
+ } catch (IOException i) {
+ LOG.warn("Could not read key value from keystore");
+ }
+ }
+
+ try {
+ warehouse = new Warehouse(MetastoreConf.newMetastoreConf());
+ } catch (MetaException e) { /* ignore */ }
+ }
+
+ @Override public void open() throws ConnectException {
+ try {
+ Class.forName(driverClassName);
+ handle = DriverManager.getConnection(jdbcUrl, username, password);
+ isOpen = true;
+ } catch (ClassNotFoundException cnfe) {
+ LOG.warn("Driver class not found in classpath:" + driverClassName);
+ throw new RuntimeException("Driver class not found:" + driverClassName);
+ } catch (SQLException sqle) {
+ LOG.warn("Could not connect to remote data source at " + jdbcUrl);
+ throw new ConnectException("Could not connect to remote datasource at "
+ jdbcUrl + ",cause:" + sqle.getMessage());
+ }
+ }
+
+ protected Connection getConnection() {
+ try {
+ if (!isOpen)
+ open();
+ } catch (ConnectException ce) {
+ throw new RuntimeException(ce.getMessage());
Review comment:
Can we wrap the ConnectionException to retain the exception history?
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
##########
@@ -0,0 +1,311 @@
+package org.apache.hadoop.hive.metastore.dataconnector.jdbc;
+
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import
org.apache.hadoop.hive.metastore.dataconnector.AbstractDataConnectorProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractJDBCConnectorProvider extends
AbstractDataConnectorProvider {
+ private static Logger LOG =
LoggerFactory.getLogger(AbstractJDBCConnectorProvider.class);
+ protected static Warehouse warehouse = null;
+
+ // duplicate constants from Constants.java to avoid a dependency on
hive-common
+ public static final String JDBC_HIVE_STORAGE_HANDLER_ID =
+ "org.apache.hive.storage.jdbc.JdbcStorageHandler";
+ public static final String JDBC_CONFIG_PREFIX = "hive.sql";
+ public static final String JDBC_CATALOG = JDBC_CONFIG_PREFIX + ".catalog";
+ public static final String JDBC_SCHEMA = JDBC_CONFIG_PREFIX + ".schema";
+ public static final String JDBC_TABLE = JDBC_CONFIG_PREFIX + ".table";
+ public static final String JDBC_DATABASE_TYPE = JDBC_CONFIG_PREFIX +
".database.type";
+ public static final String JDBC_URL = JDBC_CONFIG_PREFIX + ".jdbc.url";
+ public static final String JDBC_DRIVER = JDBC_CONFIG_PREFIX + ".jdbc.driver";
+ public static final String JDBC_USERNAME = JDBC_CONFIG_PREFIX +
".dbcp.username";
+ public static final String JDBC_PASSWORD = JDBC_CONFIG_PREFIX +
".dbcp.password";
+ public static final String JDBC_KEYSTORE = JDBC_CONFIG_PREFIX +
".dbcp.password.keystore";
+ public static final String JDBC_KEY = JDBC_CONFIG_PREFIX +
".dbcp.password.key";
+ public static final String JDBC_QUERY = JDBC_CONFIG_PREFIX + ".query";
+ public static final String JDBC_QUERY_FIELD_NAMES = JDBC_CONFIG_PREFIX +
".query.fieldNames";
+ public static final String JDBC_QUERY_FIELD_TYPES = JDBC_CONFIG_PREFIX +
".query.fieldTypes";
+ public static final String JDBC_SPLIT_QUERY = JDBC_CONFIG_PREFIX +
".query.split";
+ public static final String JDBC_PARTITION_COLUMN = JDBC_CONFIG_PREFIX +
".partitionColumn";
+ public static final String JDBC_NUM_PARTITIONS = JDBC_CONFIG_PREFIX +
".numPartitions";
+ public static final String JDBC_LOW_BOUND = JDBC_CONFIG_PREFIX +
".lowerBound";
+ public static final String JDBC_UPPER_BOUND = JDBC_CONFIG_PREFIX +
".upperBound";
+
+ private static final String JDBC_INPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcInputFormat".intern();
+ private static final String JDBC_OUTPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcOutputFormat".intern();
+
+ String type = null; // MYSQL, POSTGRES, ORACLE, DERBY, MSSQL, DB2 etc.
+ String driverClassName = null;
+ String jdbcUrl = null;
+ String username = null;
+ String password = null; // TODO convert to byte array
+
+ public AbstractJDBCConnectorProvider(String dbName, DataConnector dataConn) {
+ super(dbName, dataConn);
+ this.type = connector.getType().toUpperCase(); // TODO
+ this.jdbcUrl = connector.getUrl();
+ this.username = connector.getParameters().get(JDBC_USERNAME);
+ this.password = connector.getParameters().get(JDBC_PASSWORD);
+ if (this.password == null) {
+ String keystore = connector.getParameters().get(JDBC_KEYSTORE);
+ String key = connector.getParameters().get(JDBC_KEY);
+ try {
+ char[] keyValue = MetastoreConf.getValueFromKeystore(keystore, key);
+ if (keyValue != null)
+ this.password = new String(keyValue);
+ } catch (IOException i) {
+ LOG.warn("Could not read key value from keystore");
+ }
+ }
+
+ try {
+ warehouse = new Warehouse(MetastoreConf.newMetastoreConf());
+ } catch (MetaException e) { /* ignore */ }
+ }
+
+ @Override public void open() throws ConnectException {
+ try {
+ Class.forName(driverClassName);
+ handle = DriverManager.getConnection(jdbcUrl, username, password);
+ isOpen = true;
+ } catch (ClassNotFoundException cnfe) {
+ LOG.warn("Driver class not found in classpath:" + driverClassName);
+ throw new RuntimeException("Driver class not found:" + driverClassName);
+ } catch (SQLException sqle) {
+ LOG.warn("Could not connect to remote data source at " + jdbcUrl);
+ throw new ConnectException("Could not connect to remote datasource at "
+ jdbcUrl + ",cause:" + sqle.getMessage());
+ }
+ }
+
+ protected Connection getConnection() {
+ try {
+ if (!isOpen)
+ open();
+ } catch (ConnectException ce) {
+ throw new RuntimeException(ce.getMessage());
+ }
+
+ if (handle instanceof Connection)
+ return (Connection)handle;
+
+ throw new RuntimeException("unexpected type for connection handle");
+ }
+
+ @Override public void close() {
+ if (isOpen) {
+ try {
+ ((Connection)handle).close();
+ } catch (SQLException sqle) {
+ LOG.warn("Could not close jdbc connection to " + jdbcUrl, sqle);
Review comment:
Should we throw the SQLException forward and let it be handles in the
upper layers?
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
##########
@@ -0,0 +1,311 @@
+package org.apache.hadoop.hive.metastore.dataconnector.jdbc;
+
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import
org.apache.hadoop.hive.metastore.dataconnector.AbstractDataConnectorProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractJDBCConnectorProvider extends
AbstractDataConnectorProvider {
+ private static Logger LOG =
LoggerFactory.getLogger(AbstractJDBCConnectorProvider.class);
+ protected static Warehouse warehouse = null;
+
+ // duplicate constants from Constants.java to avoid a dependency on
hive-common
+ public static final String JDBC_HIVE_STORAGE_HANDLER_ID =
+ "org.apache.hive.storage.jdbc.JdbcStorageHandler";
+ public static final String JDBC_CONFIG_PREFIX = "hive.sql";
+ public static final String JDBC_CATALOG = JDBC_CONFIG_PREFIX + ".catalog";
+ public static final String JDBC_SCHEMA = JDBC_CONFIG_PREFIX + ".schema";
+ public static final String JDBC_TABLE = JDBC_CONFIG_PREFIX + ".table";
+ public static final String JDBC_DATABASE_TYPE = JDBC_CONFIG_PREFIX +
".database.type";
+ public static final String JDBC_URL = JDBC_CONFIG_PREFIX + ".jdbc.url";
+ public static final String JDBC_DRIVER = JDBC_CONFIG_PREFIX + ".jdbc.driver";
+ public static final String JDBC_USERNAME = JDBC_CONFIG_PREFIX +
".dbcp.username";
+ public static final String JDBC_PASSWORD = JDBC_CONFIG_PREFIX +
".dbcp.password";
+ public static final String JDBC_KEYSTORE = JDBC_CONFIG_PREFIX +
".dbcp.password.keystore";
+ public static final String JDBC_KEY = JDBC_CONFIG_PREFIX +
".dbcp.password.key";
+ public static final String JDBC_QUERY = JDBC_CONFIG_PREFIX + ".query";
+ public static final String JDBC_QUERY_FIELD_NAMES = JDBC_CONFIG_PREFIX +
".query.fieldNames";
+ public static final String JDBC_QUERY_FIELD_TYPES = JDBC_CONFIG_PREFIX +
".query.fieldTypes";
+ public static final String JDBC_SPLIT_QUERY = JDBC_CONFIG_PREFIX +
".query.split";
+ public static final String JDBC_PARTITION_COLUMN = JDBC_CONFIG_PREFIX +
".partitionColumn";
+ public static final String JDBC_NUM_PARTITIONS = JDBC_CONFIG_PREFIX +
".numPartitions";
+ public static final String JDBC_LOW_BOUND = JDBC_CONFIG_PREFIX +
".lowerBound";
+ public static final String JDBC_UPPER_BOUND = JDBC_CONFIG_PREFIX +
".upperBound";
+
+ private static final String JDBC_INPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcInputFormat".intern();
+ private static final String JDBC_OUTPUTFORMAT_CLASS =
"org.apache.hive.storage.jdbc.JdbcOutputFormat".intern();
+
+ String type = null; // MYSQL, POSTGRES, ORACLE, DERBY, MSSQL, DB2 etc.
+ String driverClassName = null;
+ String jdbcUrl = null;
+ String username = null;
+ String password = null; // TODO convert to byte array
+
+ public AbstractJDBCConnectorProvider(String dbName, DataConnector dataConn) {
+ super(dbName, dataConn);
+ this.type = connector.getType().toUpperCase(); // TODO
+ this.jdbcUrl = connector.getUrl();
+ this.username = connector.getParameters().get(JDBC_USERNAME);
+ this.password = connector.getParameters().get(JDBC_PASSWORD);
+ if (this.password == null) {
+ String keystore = connector.getParameters().get(JDBC_KEYSTORE);
+ String key = connector.getParameters().get(JDBC_KEY);
+ try {
+ char[] keyValue = MetastoreConf.getValueFromKeystore(keystore, key);
+ if (keyValue != null)
+ this.password = new String(keyValue);
+ } catch (IOException i) {
+ LOG.warn("Could not read key value from keystore");
+ }
+ }
+
+ try {
+ warehouse = new Warehouse(MetastoreConf.newMetastoreConf());
+ } catch (MetaException e) { /* ignore */ }
+ }
+
+ @Override public void open() throws ConnectException {
+ try {
+ Class.forName(driverClassName);
+ handle = DriverManager.getConnection(jdbcUrl, username, password);
+ isOpen = true;
+ } catch (ClassNotFoundException cnfe) {
+ LOG.warn("Driver class not found in classpath:" + driverClassName);
+ throw new RuntimeException("Driver class not found:" + driverClassName);
+ } catch (SQLException sqle) {
+ LOG.warn("Could not connect to remote data source at " + jdbcUrl);
+ throw new ConnectException("Could not connect to remote datasource at "
+ jdbcUrl + ",cause:" + sqle.getMessage());
+ }
+ }
+
+ protected Connection getConnection() {
+ try {
+ if (!isOpen)
+ open();
+ } catch (ConnectException ce) {
+ throw new RuntimeException(ce.getMessage());
+ }
+
+ if (handle instanceof Connection)
+ return (Connection)handle;
+
+ throw new RuntimeException("unexpected type for connection handle");
+ }
+
+ @Override public void close() {
+ if (isOpen) {
+ try {
+ ((Connection)handle).close();
+ } catch (SQLException sqle) {
+ LOG.warn("Could not close jdbc connection to " + jdbcUrl, sqle);
+ }
+ }
+ }
+
+ /**
+ * Returns Hive Table objects from the remote database for tables that match
a name pattern.
+ * @return List A collection of objects that match the name pattern, null
otherwise.
+ * @throws MetaException To indicate any failures with executing this API
+ * @param regex
+ */
+ @Override public abstract List<Table> getTables(String regex) throws
MetaException;
+
+ /**
+ * Returns a list of all table names from the remote database.
+ * @return List A collection of all the table names, null if there are no
tables.
+ * @throws MetaException To indicate any failures with executing this API
+ */
+ @Override public List<String> getTableNames() throws MetaException {
+ ResultSet rs = null;
+ try {
+ rs = getConnection().getMetaData().getTables(scoped_db, null, null, new
String[] { "TABLE" });
+ if (rs != null) {
+ List<String> tables = new ArrayList<String>();
+ while(rs.next()) {
+ tables.add(rs.getString(3));
+ }
+ return tables;
+ }
+ } catch (SQLException sqle) {
+ LOG.warn("Could not retrieve table names from remote datasource, cause:"
+ sqle.getMessage());
+ } finally {
+ try {
+ if (rs != null) {
+ rs.close();
+ rs = null;
+ }
+ } catch(Exception e) { /* ignore */}
+ }
+ return null;
+ }
+
+ protected abstract ResultSet fetchTableMetadata(String tableName) throws
MetaException;
+
+ protected abstract ResultSet fetchTableNames() throws MetaException;
+
+ /**
+ * Fetch a single table with the given name, returns a Hive Table object
from the remote database
+ * @return Table A Table object for the matching table, null otherwise.
+ * @throws MetaException To indicate any failures with executing this API
+ * @param tableName
+ */
+ @Override public Table getTable(String tableName) throws MetaException {
+ ResultSet rs = null;
+ Table table = null;
+ try {
+ // rs = fetchTableMetadata(tableName);
+ rs = fetchTableViaDBMetaData(tableName);
+ List<FieldSchema> cols = new ArrayList<>();
+ while (rs.next()) {
+ FieldSchema fs = new FieldSchema();
+ fs.setName(rs.getString("COLUMN_NAME"));
+ fs.setType(getDataType(rs.getString("TYPE_NAME"),
rs.getInt("COLUMN_SIZE")));
+ fs.setComment("inferred column type");
+ cols.add(fs);
+ }
+
+ if (cols.size() == 0) {
+ // table does not exists or could not be fetched
+ return null;
+ }
+
+ table = buildTableFromColsList(tableName, cols);
+ //Setting the table properties.
+ table.getParameters().put(JDBC_DATABASE_TYPE, this.type);
+ table.getParameters().put(JDBC_DRIVER, this.driverClassName);
+ table.getParameters().put(JDBC_TABLE, tableName);
+ table.getParameters().put(JDBC_URL, this.jdbcUrl);
+ table.getParameters().put(hive_metastoreConstants.META_TABLE_STORAGE,
JDBC_HIVE_STORAGE_HANDLER_ID);
+ table.getParameters().put("EXTERNAL", "TRUE");
+ Map<String, String> connectorParams = connector.getParameters();
+ for (String param: connectorParams.keySet()) {
+ if (param.startsWith(JDBC_CONFIG_PREFIX)) {
+ table.getParameters().put(param, connectorParams.get(param));
+ }
+ }
+ return table;
+ } catch (Exception e) {
+ LOG.warn("Exception retrieving remote table " + scoped_db + "." +
tableName + " via data connector "
+ + connector.getName());
+ throw new MetaException("Error retrieving remote table:" + e);
+ } finally {
+ try {
+ if (rs != null) {
+ rs.close();
+ }
+ } catch (Exception ex) { /* ignore */ }
+ }
+ }
+
+ private ResultSet fetchTableViaDBMetaData(String tableName) {
+ ResultSet rs = null;
+ try {
+ rs = getConnection().getMetaData().getColumns(scoped_db, null,
tableName, null);
+ } catch (SQLException sqle) {
+ LOG.warn("Could not retrieve column names from JDBC table, cause:" +
sqle.getMessage());
Review comment:
Should we not throw the exception forward instead of returning a null
result set? It would be difficult to distinguish the absence of the table from
a JDBC access exception, right?
##########
File path: standalone-metastore/metastore-server/src/main/resources/package.jdo
##########
@@ -1601,6 +1609,41 @@
<column name="DB_ID"/>
</index>
</class>
+ <class name="MDataConnector" table="DATACONNECTORS"
identity-type="application" detachable="true">
+ <field name="name" primary-key="true">
+ <column name="NAME" length="128" jdbc-type="VARCHAR"
allows-null="false"/>
+ </field>
+ <field name="type">
+ <column name="TYPE" length="128" jdbc-type="VARCHAR"
allows-null="false"/>
+ </field>
+ <field name="url">
+ <column name="URL" length="4000" jdbc-type="VARCHAR"
allows-null="false"/>
+ </field>
+ <field name="description">
+ <column name="COMMENT" length="256" jdbc-type="VARCHAR"/>
+ </field>
+ <field name="ownerName">
+ <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR"
allows-null="true"/>
+ </field>
+ <field name="ownerType">
+ <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR"
allows-null="true"/>
+ </field>
+ <field name="createTime">
+ <column name="CREATE_TIME" jdbc-type="integer" allows-null="false"/>
+ </field>
+ <field name="parameters" table="DATACONNECTOR_PARAMS">
+ <map key-type="java.lang.String" value-type="java.lang.String"/>
+ <join>
+ <column name="NAME"/>
+ </join>
+ <key>
+ <column name="PARAM_KEY" length="180" jdbc-type="VARCHAR"/>
+ </key>
+ <value>
+ <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
Review comment:
Just confirming, Should this be 32762?
##########
File path:
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/MySQLConnectorProvider.java
##########
@@ -0,0 +1,87 @@
+package org.apache.hadoop.hive.metastore.dataconnector.jdbc;
+
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.api.DataConnector;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+
+public class MySQLConnectorProvider extends AbstractJDBCConnectorProvider {
+ private static Logger LOG =
LoggerFactory.getLogger(MySQLConnectorProvider.class);
+
+ private static final String DRIVER_CLASS = "com.mysql.jdbc.Driver".intern();
+
+ public MySQLConnectorProvider(String dbName, DataConnector dataConn) {
+ super(dbName, dataConn);
+ driverClassName = DRIVER_CLASS;
+ }
+
+ /**
+ * Returns a list of all table names from the remote database.
+ * @return List A collection of all the table names, null if there are no
tables.
+ * @throws MetaException To indicate any failures with executing this API
+ */
+ @Override protected ResultSet fetchTableNames() throws MetaException {
+ ResultSet rs = null;
+ try {
+ rs = getConnection().getMetaData().getTables(scoped_db, null, null, new
String[] { "TABLE" });
+ } catch (SQLException sqle) {
+ LOG.warn("Could not retrieve table names from remote datasource, cause:"
+ sqle.getMessage());
+ throw new MetaException("Could not retrieve table names from remote
datasource, cause:" + sqle.getMessage());
Review comment:
Please consider wrapping sqle in the MetaException and throwing it.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]