kgyrtkirk commented on a change in pull request #1633:
URL: https://github.com/apache/hive/pull/1633#discussion_r527557968
##########
File path: hplsql/pom.xml
##########
@@ -46,6 +46,11 @@
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-standalone-metastore-common</artifactId>
Review comment:
I think hive-exec already contains this transitively - is this dep
really neccessary?
##########
File path: beeline/src/java/org/apache/hive/beeline/BeeLine.java
##########
@@ -892,8 +893,12 @@ private boolean connectUsingArgs(BeelineParser
beelineParser, CommandLine cl) {
getOpts().setInitFiles(cl.getOptionValues("i"));
getOpts().setScriptFile(cl.getOptionValue("f"));
-
if (url != null) {
+ String hplSqlMode = Utils.parsePropertyFromUrl(url,
Constants.HPLSQL_MODE);
Review comment:
I think it would be better if this would be `mode=hplsql`; that way we
may add more "mode"-s later
what do you think?
##########
File path: hplsql/src/main/java/org/apache/hive/hplsql/Copy.java
##########
@@ -233,16 +219,16 @@ void copyToFile(HplsqlParser.Copy_stmtContext ctx, Query
query) throws Exception
sql = "INSERT INTO " + sqlInsertName + " VALUES (";
rowdel = ");\n".getBytes();
}
- while (rs.next()) {
+ while (query.next()) {
if (sqlInsert) {
out.write(sql.getBytes());
}
- for (int i = 1; i <= cols; i++) {
- if (i > 1) {
+ for (int i = 0; i < cols; i++) {
Review comment:
(QR-indexes#2) and there are places where it changes to 0 indexed
##########
File path: service/src/java/org/apache/hive/service/cli/operation/Operation.java
##########
@@ -88,8 +89,14 @@ protected Operation(HiveSession parentSession, OperationType
opType) {
}
protected Operation(HiveSession parentSession,
- Map<String, String> confOverlay, OperationType opType) {
+ Map<String, String> confOverlay, OperationType opType) {
+ this(parentSession, confOverlay, opType, false);
+ }
+
+ protected Operation(HiveSession parentSession,
+ Map<String, String> confOverlay, OperationType opType, boolean embedded)
{
Review comment:
I don't see this or SqlOperation called with a `true` embedded parameter
this one way to create a multi statement query thing - I wonder if we could
have that as a first class citizen - and implement the old one as a
single-instruction operation. what do you think?
##########
File path: hplsql/src/main/java/org/apache/hive/hplsql/Var.java
##########
@@ -258,44 +256,35 @@ public void setValue(Object value) {
this.value = value;
}
}
-
- /**
- * Set the new value from the result set
- */
- public Var setValue(ResultSet rs, ResultSetMetaData rsm, int idx) throws
SQLException {
- int type = rsm.getColumnType(idx);
+
+ public Var setValue(QueryResult queryResult, int idx) {
+ int type = queryResult.jdbcType(idx);
if (type == java.sql.Types.CHAR || type == java.sql.Types.VARCHAR) {
- cast(new Var(rs.getString(idx)));
- }
- else if (type == java.sql.Types.INTEGER || type == java.sql.Types.BIGINT ||
- type == java.sql.Types.SMALLINT || type == java.sql.Types.TINYINT) {
- cast(new Var(Long.valueOf(rs.getLong(idx))));
- }
- else if (type == java.sql.Types.DECIMAL || type == java.sql.Types.NUMERIC)
{
- cast(new Var(rs.getBigDecimal(idx)));
- }
- else if (type == java.sql.Types.FLOAT || type == java.sql.Types.DOUBLE) {
- cast(new Var(Double.valueOf(rs.getDouble(idx))));
+ cast(new Var(queryResult.column(idx, String.class)));
+ } else if (type == java.sql.Types.INTEGER || type == java.sql.Types.BIGINT
||
+ type == java.sql.Types.SMALLINT || type == java.sql.Types.TINYINT)
{
+ cast(new Var(Long.valueOf(queryResult.column(idx, Long.class))));
+ } else if (type == java.sql.Types.DECIMAL || type ==
java.sql.Types.NUMERIC) {
+ cast(new Var(queryResult.column(idx, BigDecimal.class)));
+ } else if (type == java.sql.Types.FLOAT || type == java.sql.Types.DOUBLE) {
+ cast(new Var(Double.valueOf(queryResult.column(idx, Double.class))));
}
return this;
}
-
- /**
- * Set ROW values from the result set
- */
- public Var setValues(ResultSet rs, ResultSetMetaData rsm) throws
SQLException {
+
+ public Var setValues(QueryResult queryResult) {
Row row = (Row)this.value;
- int idx = 1;
+ int idx = 0;
Review comment:
(QR-indexes#2) and there are places where it changes to 0 indexed
jdbc drivers usually start numbering from 1; but internal stuff tends to use
0.
it would be helpfull to write this contract down in the QueryResult's apidoc
will the "old" approach still work after these changes?
##########
File path:
hplsql/src/main/java/org/apache/hive/hplsql/executor/JdbcQueryExecutor.java
##########
@@ -0,0 +1,101 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hive.hplsql.executor;
+
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.antlr.v4.runtime.ParserRuleContext;
+import org.apache.hive.hplsql.Exec;
+import org.apache.hive.hplsql.Query;
+
+public class JdbcQueryExecutor implements QueryExecutor {
+ private final Exec exec;
+
+ public JdbcQueryExecutor(Exec exec) {
+ this.exec = exec;
+ }
+
+ @Override
+ public QueryResult executeQuery(String sql, ParserRuleContext ctx) {
+ String conn = exec.getStatementConnection();
+ Query query = exec.executeQuery(ctx, new Query(sql), conn);
+ ResultSet resultSet = query.getResultSet();
+ if (resultSet == null) { // offline mode
+ return new QueryResult(null, () -> new
Metadata(Collections.emptyList()), query.getException());
+ } else {
+ return new QueryResult(new JdbcRowResult(resultSet), () ->
metadata(resultSet), query.getException());
+ }
+ }
+
+ private static Metadata metadata(ResultSet resultSet) {
+ try {
+ ResultSetMetaData meta = resultSet.getMetaData();
+ List<ColumnMeta> colMetas = new ArrayList<>();
+ for (int i = 1; i <= meta.getColumnCount(); i++) {
+ colMetas.add(new ColumnMeta(
+ meta.getColumnName(i), meta.getColumnTypeName(i),
meta.getColumnType(i)));
+ }
+ return new Metadata(colMetas);
+ } catch (SQLException e) {
+ throw new QueryException(e);
+ }
+ }
+
+ private static class JdbcRowResult implements RowResult {
+ private final ResultSet resultSet;
+
+ private JdbcRowResult(ResultSet resultSet) {
+ this.resultSet = resultSet;
+ }
+
+ @Override
+ public boolean next() {
+ try {
+ return resultSet.next();
+ } catch (SQLException e) {
+ throw new QueryException(e);
+ }
+ }
+
+ @Override
+ public <T> T get(int columnIndex, Class<T> type) {
+ try {
+ return resultSet.getObject(columnIndex, type);
Review comment:
(QR-indexes#4) given the things I've seen so far:
I think right now queryresult is 0 indexed; and this seems to be the jdbc
adaptor so I would have expected a +/- 1 here
##########
File path: hplsql/src/main/java/org/apache/hive/hplsql/Cmp.java
##########
@@ -138,28 +147,26 @@ else if (query2.error()) {
exec.signal(query2);
return null;
}
- ResultSet rs1 = query1.getResultSet();
- ResultSet rs2 = query2.getResultSet();
- if (rs1 == null || rs2 == null) {
+ if (query1 == null || query2 == null) {
exec.setSqlCode(-1);
return null;
}
boolean equal = true;
tests = 0;
failedTests = 0;
try {
- ResultSetMetaData rm1 = rs1.getMetaData();
- ResultSetMetaData rm2 = rs2.getMetaData();
- int cnt1 = rm1.getColumnCount();
- int cnt2 = rm2.getColumnCount();
+ Metadata rm1 = query1.metadata();
+ Metadata rm2 = query2.metadata();
+ int cnt1 = rm1.columnCount();
+ int cnt2 = rm2.columnCount();
tests = cnt1;
- while (rs1.next() && rs2.next()) {
+ while (query1.next() && query2.next()) {
for (int i = 1; i <= tests; i++) {
Review comment:
(QR-indexes#1) there seems to be existing loops starting for idx "1"
##########
File path: jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
##########
@@ -18,18 +18,67 @@
package org.apache.hive.jdbc;
-import com.google.common.annotations.VisibleForTesting;
+import static org.apache.hadoop.hive.conf.Constants.HPLSQL_MODE;
+import java.io.BufferedReader;
+import java.io.DataInputStream;
+import java.io.File;
Review comment:
could you configure import order in your ide to not reorder imports
"this much"?
##########
File path: service/src/java/org/apache/hive/service/cli/operation/Operation.java
##########
@@ -102,7 +109,7 @@ protected Operation(HiveSession parentSession,
MetricsConstant.COMPLETED_OPERATION_PREFIX, state);
queryState = new QueryState.Builder()
.withConfOverlay(confOverlay)
- .withGenerateNewQueryId(true)
+ .withGenerateNewQueryId(!embedded)
Review comment:
what does embedded mean here?
* embedded hiveserver2
* some hplsql related stuff?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]