This is an automated email from the ASF dual-hosted git repository.
dzamo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git
The following commit(s) were added to refs/heads/master by this push:
new c093851 DRILL-7994: Dependency version updates for severe
vulnerabilities (#2432)
c093851 is described below
commit c093851ea075225cfe84a169ef470bb0da3f931f
Author: James Turton <[email protected]>
AuthorDate: Wed Feb 2 11:18:22 2022 +0200
DRILL-7994: Dependency version updates for severe vulnerabilities (#2432)
* Sev 8+ vuln dep updates.
* Adjust netty-tcnative version for windows-x86_64.
* Bump Avro because of severe Velocity vulns.
* Adjust expected row count to accommodate t-digest 3.3 improvements.
"The meaning of the compression parameter became a bit more strict in this
latest version. As such, the digest should be a bit smaller and tails
should be a bit more accurate." -- Ted Dunning
* Code comment whitespace formatting.
* Remove cruft from jdbc-all jar.
* Return needed Netty jars to jdbc-all.
* Adjust unit test memory allocs for TestAggregateFunctions.
* Remove references to unused artifact jersey-guava.
* Netty to 2.0.48.
* +200MiB direct memory for the unit test run.
* Upgrade jetty-server and h2 for dependabot.
* Fixes for H2 lib upgrade.
* Work around DOUBLE type DDL syntax error for pg dialect.
* Increase CI heap mem allocation for the sake of JDK 8.
* Specify parallel GC for CI to try to minimise peak mem usage.
* Cast Netty ByteBuffer to Buffer for JDK 8 compat.
* Increase CI heap mem allocation for the sake of TestAggregateFunctions.
---
.github/workflows/ci.yml | 5 +-
.../exec/store/http/HttpScanBatchCreator.java | 4 +-
contrib/storage-jdbc/pom.xml | 2 +-
.../exec/store/jdbc/CapitalizingJdbcSchema.java | 6 +-
.../drill/exec/store/jdbc/JdbcRecordWriter.java | 10 +-
...eryBuilder.java => CreateTableStmtBuilder.java} | 42 ++-
...uilder.java => TestCreateTableStmtBuilder.java} | 16 +-
.../exec/store/jdbc/TestJdbcPluginWithH2IT.java | 14 +-
.../exec/store/jdbc/TestJdbcWriterWithH2.java | 419 +++++++++++----------
.../exec/store/jdbc/TestJdbcWriterWithMySQL.java | 6 +-
.../store/jdbc/TestJdbcWriterWithPostgres.java | 4 +-
.../src/test/resources/h2-test-data.sql | 4 +-
.../resources/h2_information_schema_tables.csv | 52 +--
distribution/pom.xml | 10 +-
distribution/src/assemble/component.xml | 10 +-
distribution/src/main/resources/LICENSE | 3 +-
exec/java-exec/pom.xml | 2 +-
.../planner/sql/handlers/DropTableHandler.java | 4 +-
.../parquet/hadoop/ColumnChunkIncReadStore.java | 7 +-
.../org/apache/drill/exec/sql/TestAnalyze.java | 2 +-
.../storage/CredentialsProviderSerDeTest.java | 3 +-
exec/jdbc-all/pom.xml | 22 +-
.../src/main/java/io/netty/buffer/DrillBuf.java | 3 +-
pom.xml | 19 +-
24 files changed, 356 insertions(+), 313 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index e20409e..441c2ac 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -59,7 +59,10 @@ jobs:
java-version: ${{ matrix.java }}
- name: Build and test
# The total GitHub Actions memory is 7000Mb. But GitHub CI requires
some memory for the container to perform tests
- run: mvn install --batch-mode --no-transfer-progress
-Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false
-Dmaven.wagon.httpconnectionManager.ttlSeconds=120 # -X -V for debugging
+ run: |
+ echo Selecting parallel GC to minimise peak mem usage.
+ MAVEN_OPTS="-XX:+UseParallelGC"
+ mvn install --batch-mode --no-transfer-progress
-Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false
-Dmaven.wagon.httpconnectionManager.ttlSeconds=120 # -X -V for debugging
checkstyle_protobuf:
name: Run checkstyle and generate protobufs
diff --git
a/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpScanBatchCreator.java
b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpScanBatchCreator.java
index fda295c..3bdee3f 100644
---
a/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpScanBatchCreator.java
+++
b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpScanBatchCreator.java
@@ -162,8 +162,8 @@ public class HttpScanBatchCreator implements
BatchCreator<HttpSubScan> {
}
} else if (paginatorConfig != null) {
/*
- * If the paginator is not null and generated a list of URLs, we create
- * a new batch reader for each URL. In the future, this could be
parallelized in
+ * If the paginator is not null we create a new batch reader for each
+ * URL that it generates. In the future, this could be parallelized in
* the group scan such that the calls could be sent to different
drillbits.
*/
if (!paginator.hasNext()) {
diff --git a/contrib/storage-jdbc/pom.xml b/contrib/storage-jdbc/pom.xml
index 92941eb..c4bd765 100755
--- a/contrib/storage-jdbc/pom.xml
+++ b/contrib/storage-jdbc/pom.xml
@@ -33,7 +33,7 @@
<properties>
<mysql.connector.version>8.0.25</mysql.connector.version>
<clickhouse.jdbc.version>0.3.1</clickhouse.jdbc.version>
- <h2.version>1.4.200</h2.version>
+ <h2.version>2.1.210</h2.version>
<postgresql.version>42.2.24</postgresql.version>
</properties>
diff --git
a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/CapitalizingJdbcSchema.java
b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/CapitalizingJdbcSchema.java
index d072751..1d7503f 100644
---
a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/CapitalizingJdbcSchema.java
+++
b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/CapitalizingJdbcSchema.java
@@ -41,7 +41,7 @@ import org.apache.drill.exec.planner.logical.CreateTableEntry;
import org.apache.drill.exec.store.AbstractSchema;
import org.apache.drill.exec.store.StorageStrategy;
import org.apache.drill.exec.store.jdbc.utils.JdbcDDLQueryUtils;
-import org.apache.drill.exec.store.jdbc.utils.JdbcQueryBuilder;
+import org.apache.drill.exec.store.jdbc.utils.CreateTableStmtBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -123,7 +123,7 @@ public class CapitalizingJdbcSchema extends AbstractSchema {
@Override
public Writer getWriter(PhysicalOperator child) throws IOException {
- String tableWithSchema =
JdbcQueryBuilder.buildCompleteTableName(tableName, catalog, schema);
+ String tableWithSchema =
CreateTableStmtBuilder.buildCompleteTableName(tableName, catalog, schema);
return new JdbcWriter(child, tableWithSchema, inner, plugin);
}
@@ -143,7 +143,7 @@ public class CapitalizingJdbcSchema extends AbstractSchema {
.build(logger);
}
- String tableWithSchema =
JdbcQueryBuilder.buildCompleteTableName(tableName, catalog, schema);
+ String tableWithSchema =
CreateTableStmtBuilder.buildCompleteTableName(tableName, catalog, schema);
String dropTableQuery = String.format("DROP TABLE %s", tableWithSchema);
dropTableQuery = JdbcDDLQueryUtils.cleanDDLQuery(dropTableQuery,
plugin.getDialect());
diff --git
a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcRecordWriter.java
b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcRecordWriter.java
index afc0b5c..fe83a2e 100644
---
a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcRecordWriter.java
+++
b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcRecordWriter.java
@@ -57,7 +57,7 @@ import org.apache.drill.exec.record.VectorAccessible;
import org.apache.drill.exec.store.AbstractRecordWriter;
import org.apache.drill.exec.store.EventBasedRecordWriter.FieldConverter;
import org.apache.drill.exec.store.jdbc.utils.JdbcDDLQueryUtils;
-import org.apache.drill.exec.store.jdbc.utils.JdbcQueryBuilder;
+import org.apache.drill.exec.store.jdbc.utils.CreateTableStmtBuilder;
import org.apache.drill.exec.util.DecimalUtility;
import org.apache.drill.exec.vector.complex.reader.FieldReader;
import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableMap;
@@ -102,8 +102,8 @@ public class JdbcRecordWriter extends AbstractRecordWriter {
* mapped to VARBINARY.
*/
public static final ImmutableMap<MinorType, Integer> JDBC_TYPE_MAPPINGS =
ImmutableMap.<MinorType, Integer>builder()
- .put(MinorType.FLOAT8, java.sql.Types.NUMERIC)
- .put(MinorType.FLOAT4, java.sql.Types.NUMERIC)
+ .put(MinorType.FLOAT8, java.sql.Types.DOUBLE)
+ .put(MinorType.FLOAT4, java.sql.Types.FLOAT)
.put(MinorType.TINYINT, java.sql.Types.TINYINT)
.put(MinorType.SMALLINT, java.sql.Types.SMALLINT)
.put(MinorType.INT, java.sql.Types.INTEGER)
@@ -151,7 +151,7 @@ public class JdbcRecordWriter extends AbstractRecordWriter {
MinorType type;
String sql;
boolean nullable = false;
- JdbcQueryBuilder queryBuilder = new JdbcQueryBuilder(tableName, dialect);
+ CreateTableStmtBuilder queryBuilder = new
CreateTableStmtBuilder(tableName, dialect);
for (MaterializedField field : schema) {
columnName = JdbcDDLQueryUtils.addBackTicksToField(field.getName());
@@ -174,7 +174,7 @@ public class JdbcRecordWriter extends AbstractRecordWriter {
queryBuilder.addColumn(columnName, field.getType().getMinorType(),
nullable, precision, scale);
}
- sql = queryBuilder.getCreateTableQuery();
+ sql = queryBuilder.build().getCreateTableQuery();
sql = JdbcDDLQueryUtils.cleanDDLQuery(sql, dialect);
logger.debug("Final query: {}", sql);
diff --git
a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/utils/JdbcQueryBuilder.java
b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/utils/CreateTableStmtBuilder.java
similarity index 77%
rename from
contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/utils/JdbcQueryBuilder.java
rename to
contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/utils/CreateTableStmtBuilder.java
index 2d3c439..f05ded7 100644
---
a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/utils/JdbcQueryBuilder.java
+++
b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/utils/CreateTableStmtBuilder.java
@@ -19,6 +19,7 @@
package org.apache.drill.exec.store.jdbc.utils;
import org.apache.calcite.sql.SqlDialect;
+import org.apache.calcite.sql.dialect.PostgresqlSqlDialect;
import org.apache.drill.common.exceptions.UserException;
import org.apache.drill.common.types.TypeProtos.MinorType;
import org.apache.drill.exec.store.jdbc.JdbcRecordWriter;
@@ -28,22 +29,22 @@ import org.slf4j.LoggerFactory;
import java.sql.JDBCType;
-public class JdbcQueryBuilder {
- private static final Logger logger =
LoggerFactory.getLogger(JdbcQueryBuilder.class);
+public class CreateTableStmtBuilder {
+ private static final Logger logger =
LoggerFactory.getLogger(CreateTableStmtBuilder.class);
public static final int DEFAULT_VARCHAR_PRECISION = 100;
private static final String CREATE_TABLE_QUERY = "CREATE TABLE %s (";
- private final StringBuilder createTableQuery;
- private SqlDialect dialect;
+ private StringBuilder createTableQuery;
+ private final String tableName;
+ private final SqlDialect dialect;
private StringBuilder columns;
- public JdbcQueryBuilder(String tableName, SqlDialect dialect) {
+ public CreateTableStmtBuilder(String tableName, SqlDialect dialect) {
if (Strings.isNullOrEmpty(tableName)) {
throw new UnsupportedOperationException("Table name cannot be empty");
}
+ this.tableName = tableName;
this.dialect = dialect;
- createTableQuery = new StringBuilder();
- createTableQuery.append(String.format(CREATE_TABLE_QUERY, tableName));
columns = new StringBuilder();
}
@@ -57,9 +58,18 @@ public class JdbcQueryBuilder {
*/
public void addColumn(String colName, MinorType type, boolean nullable, int
precision, int scale) {
StringBuilder queryText = new StringBuilder();
- String jdbcColType = "";
+ String jdbcColTypeName = "";
try {
- jdbcColType =
JDBCType.valueOf(JdbcRecordWriter.JDBC_TYPE_MAPPINGS.get(type)).getName();
+ Integer jdbcColType = JdbcRecordWriter.JDBC_TYPE_MAPPINGS.get(type);
+ jdbcColTypeName = JDBCType.valueOf(jdbcColType).getName();
+
+ if (dialect instanceof PostgresqlSqlDialect) {
+ // pg data type name special case
+ if (jdbcColType.equals(java.sql.Types.DOUBLE)) {
+ // TODO: Calcite will incorrectly output DOUBLE instead of DOUBLE
PRECISION under the pg dialect
+ jdbcColTypeName = "FLOAT";
+ }
+ }
} catch (NullPointerException e) {
// JDBC Does not support writing complex fields to databases
throw UserException.dataWriteError()
@@ -68,10 +78,10 @@ public class JdbcQueryBuilder {
.build(logger);
}
- queryText.append(colName).append(" ").append(jdbcColType);
+ queryText.append(colName).append(" ").append(jdbcColTypeName);
// Add precision or scale if applicable
- if (jdbcColType.equals("VARCHAR")) {
+ if (jdbcColTypeName.equals("VARCHAR")) {
int max_precision = Math.max(precision, DEFAULT_VARCHAR_PRECISION);
queryText.append("(").append(max_precision).append(")");
}
@@ -91,10 +101,16 @@ public class JdbcQueryBuilder {
* Generates the CREATE TABLE query.
* @return The create table query.
*/
- public String getCreateTableQuery() {
+ public CreateTableStmtBuilder build() {
+ createTableQuery = new StringBuilder();
+ createTableQuery.append(String.format(CREATE_TABLE_QUERY, tableName));
createTableQuery.append(columns);
createTableQuery.append("\n)");
- return createTableQuery.toString();
+ return this;
+ }
+
+ public String getCreateTableQuery() {
+ return createTableQuery != null ? createTableQuery.toString() : null;
}
@Override
diff --git
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcQueryBuilder.java
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestCreateTableStmtBuilder.java
similarity index 64%
rename from
contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcQueryBuilder.java
rename to
contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestCreateTableStmtBuilder.java
index 99fc5a2..9d827a6 100644
---
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcQueryBuilder.java
+++
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestCreateTableStmtBuilder.java
@@ -18,12 +18,12 @@
package org.apache.drill.exec.store.jdbc;
-import org.apache.drill.exec.store.jdbc.utils.JdbcQueryBuilder;
+import org.apache.drill.exec.store.jdbc.utils.CreateTableStmtBuilder;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
-public class TestJdbcQueryBuilder {
+public class TestCreateTableStmtBuilder {
@Test
public void testSimpleTable() {
@@ -31,10 +31,10 @@ public class TestJdbcQueryBuilder {
String schema = "schema";
String catalog = "catalog";
- String completeTable = JdbcQueryBuilder.buildCompleteTableName(table,
catalog, schema);
+ String completeTable =
CreateTableStmtBuilder.buildCompleteTableName(table, catalog, schema);
assertEquals("`catalog`.`schema`.`table`", completeTable);
- assertEquals("`catalog`.`table`",
JdbcQueryBuilder.buildCompleteTableName(table, catalog, ""));
- assertEquals("`catalog`.`table`",
JdbcQueryBuilder.buildCompleteTableName(table, catalog, null));
+ assertEquals("`catalog`.`table`",
CreateTableStmtBuilder.buildCompleteTableName(table, catalog, ""));
+ assertEquals("`catalog`.`table`",
CreateTableStmtBuilder.buildCompleteTableName(table, catalog, null));
}
@Test
@@ -43,9 +43,9 @@ public class TestJdbcQueryBuilder {
String schema = "schema with spaces";
String catalog = "catalog with spaces";
- String completeTable = JdbcQueryBuilder.buildCompleteTableName(table,
catalog, schema);
+ String completeTable =
CreateTableStmtBuilder.buildCompleteTableName(table, catalog, schema);
assertEquals("`catalog with spaces`.`schema with spaces`.`table with
spaces`", completeTable);
- assertEquals("`catalog with spaces`.`table with spaces`",
JdbcQueryBuilder.buildCompleteTableName(table, catalog, ""));
- assertEquals("`catalog with spaces`.`table with spaces`",
JdbcQueryBuilder.buildCompleteTableName(table, catalog, null));
+ assertEquals("`catalog with spaces`.`table with spaces`",
CreateTableStmtBuilder.buildCompleteTableName(table, catalog, ""));
+ assertEquals("`catalog with spaces`.`table with spaces`",
CreateTableStmtBuilder.buildCompleteTableName(table, catalog, null));
}
}
diff --git
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithH2IT.java
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithH2IT.java
index a84978f..4167306 100644
---
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithH2IT.java
+++
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithH2IT.java
@@ -34,7 +34,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.io.FileReader;
-import java.math.BigDecimal;
import java.net.URL;
import java.nio.file.Paths;
import java.sql.Connection;
@@ -101,7 +100,6 @@ public class TestJdbcPluginWithH2IT extends ClusterTest {
@Test
public void validateResult() throws Exception {
- // Skip date, time, and timestamp types since h2 mangles these due to
improper timezone support.
testBuilder()
.sqlQuery(
"select person_id, first_name, last_name, address, city, state,
zip, json, bigint_field, smallint_field, " +
@@ -112,18 +110,18 @@ public class TestJdbcPluginWithH2IT extends ClusterTest {
"bigint_field", "smallint_field", "numeric_field",
"boolean_field", "double_field", "float_field",
"real_field", "time_field", "timestamp_field", "date_field",
"clob_field")
.baselineValues(1, "first_name_1", "last_name_1", "1401 John F Kennedy
Blvd", "Philadelphia", "PA", 19107,
- "{ a : 5, b : 6 }", 123456L, 1, new BigDecimal("10.01"), false,
1.0, 1.1, 111.00,
+ "{ a : 5, b : 6 }", 123456L, 1, 10.01, false, 1.0, 1.1f, 111.00,
DateUtility.parseLocalTime("13:00:01.0"),
DateUtility.parseLocalDateTime("2012-02-29 13:00:01.0"),
DateUtility.parseLocalDate("2012-02-29"), "some clob data 1")
.baselineValues(2, "first_name_2", "last_name_2", "One Ferry
Building", "San Francisco", "CA", 94111,
- "{ foo : \"abc\" }", 95949L, 2, new BigDecimal("20.02"), true,
2.0, 2.1, 222.00,
+ "{ foo : \"abc\" }", 95949L, 2, 20.02, true, 2.0, 2.1f, 222.00,
DateUtility.parseLocalTime("23:59:59.0"),
DateUtility.parseLocalDateTime("1999-09-09 23:59:59.0"),
DateUtility.parseLocalDate("1999-09-09"), "some more clob data")
.baselineValues(3, "first_name_3", "last_name_3", "176 Bowery", "New
York", "NY", 10012, "{ z : [ 1, 2, 3 ] }",
- 45456L, 3, new BigDecimal("30.04"), true, 3.0, 3.1, 333.00,
DateUtility.parseLocalTime("11:34:21.0"),
+ 45456L, 3, 30.04, true, 3.0, 3.1f, 333.00,
DateUtility.parseLocalTime("11:34:21.0"),
DateUtility.parseLocalDateTime("2011-10-30 11:34:21.0"),
DateUtility.parseLocalDate("2011-10-30"), "clobber")
.baselineValues(4, null, null, "2 15th St NW", "Washington", "DC",
20007, "{ z : { a : 1, b : 2, c : 3 } }",
- -67L, 4, new BigDecimal("40.04"), false, 4.0, 4.1, 444.00,
DateUtility.parseLocalTime("16:00:01.0"),
+ -67L, 4, 40.04, false, 4.0, 4.1f, 444.00,
DateUtility.parseLocalTime("16:00:01.0"),
DateUtility.parseLocalDateTime("2015-06-01 16:00:01.0"),
DateUtility.parseLocalDate("2015-06-01"), "xxx")
.baselineValues(5, null, null, null, null, null, null, null, null,
null, null, null, null, null, null,
null, null, null, null)
@@ -275,7 +273,7 @@ public class TestJdbcPluginWithH2IT extends ClusterTest {
testBuilder()
.sqlQuery(sql)
.unOrdered()
- .expectsNumRecords(33)
+ .expectsNumRecords(35)
.csvBaselineFile("h2_information_schema_tables.csv")
.baselineColumns("TABLE_SCHEMA", "TABLE_NAME")
.go();
@@ -288,7 +286,7 @@ public class TestJdbcPluginWithH2IT extends ClusterTest {
.sqlQuery(query)
.unOrdered()
.baselineColumns("table_type")
- .baselineValuesForSingleColumn("SYSTEM TABLE", "TABLE")
+ .baselineValuesForSingleColumn("SYSTEM TABLE", "TABLE", "VIEW",
"OTHER")
.go();
}
diff --git
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithH2.java
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithH2.java
index 1013b8e..afde051 100644
---
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithH2.java
+++
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithH2.java
@@ -57,6 +57,10 @@ import static org.junit.Assert.fail;
public class TestJdbcWriterWithH2 extends ClusterTest {
+ public static final String TEST_TABLE = "h2.tmp.drill_h2_test.test_table";
+
+ public static final String DROP_TEST_TABLE = String.format("DROP TABLE %s",
TEST_TABLE);
+
@BeforeClass
public static void init() throws Exception {
startCluster(ClusterFixture.builder(dirTestWatcher));
@@ -84,8 +88,7 @@ public class TestJdbcWriterWithH2 extends ClusterTest {
jdbcStorageConfigNoWrite.setEnabled(true);
cluster.defineStoragePlugin("h2", jdbcStorageConfig);
- cluster.defineStoragePlugin("h2o", jdbcStorageConfig);
- cluster.defineStoragePlugin("h2o_unwritable", jdbcStorageConfigNoWrite);
+ cluster.defineStoragePlugin("h2_unwritable", jdbcStorageConfigNoWrite);
EnumMockPlugin.EnumMockStoragePluginConfig config = new
EnumMockPlugin.EnumMockStoragePluginConfig();
config.setEnabled(true);
@@ -94,36 +97,37 @@ public class TestJdbcWriterWithH2 extends ClusterTest {
@Test
public void testBasicCTAS() throws Exception {
- String query = "CREATE TABLE h2.tmp.`drill_h2_test`.`test_table` (ID,
NAME) AS SELECT * FROM (VALUES(1,2), (3,4))";
+ String query = String.format("CREATE TABLE %s (ID, NAME) AS SELECT * FROM
(VALUES(1,2), (3,4))", TEST_TABLE);
// Create the table and insert the values
QuerySummary insertResults = queryBuilder().sql(query).run();
- assertTrue(insertResults.succeeded());
-
- // Query the table to see if the insertion was successful
- String testQuery = "SELECT * FROM h2.tmp.`drill_h2_test`.`test_table`";
- DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
-
- TupleMetadata expectedSchema = new SchemaBuilder()
- .add("ID", MinorType.BIGINT, DataMode.OPTIONAL)
- .add("NAME", MinorType.BIGINT, DataMode.OPTIONAL)
- .buildSchema();
-
- RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
- .addRow(1L, 2L)
- .addRow(3L, 4L)
- .build();
-
- RowSetUtilities.verify(expected, results);
- // Now drop the table
- String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`test_table`";
- QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
- assertTrue(dropResults.succeeded());
+ try {
+ assertTrue(insertResults.succeeded());
+
+ // Query the table to see if the insertion was successful
+ String testQuery = String.format("SELECT * FROM %s", TEST_TABLE);
+ DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
+
+ TupleMetadata expectedSchema = new SchemaBuilder()
+ .add("ID", MinorType.BIGINT, DataMode.OPTIONAL)
+ .add("NAME", MinorType.BIGINT, DataMode.OPTIONAL)
+ .buildSchema();
+
+ RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+ .addRow(1L, 2L)
+ .addRow(3L, 4L)
+ .build();
+
+ RowSetUtilities.verify(expected, results);
+ } finally {
+ QuerySummary dropResults = queryBuilder().sql(DROP_TEST_TABLE).run();
+ assertTrue(dropResults.succeeded());
+ }
}
@Test
public void testBasicCTASWithDataTypes() throws Exception {
- String query = "CREATE TABLE h2.tmp.`drill_h2_test`.`data_types` AS " +
+ String query = String.format("CREATE TABLE %s AS ", TEST_TABLE) +
"SELECT CAST(1 AS INTEGER) AS int_field," +
"CAST(2 AS BIGINT) AS bigint_field," +
"CAST(3.0 AS FLOAT) AS float4_field," +
@@ -137,90 +141,91 @@ public class TestJdbcWriterWithH2 extends ClusterTest {
QuerySummary insertResults = queryBuilder().sql(query).run();
assertTrue(insertResults.succeeded());
- // Query the table to see if the insertion was successful
- String testQuery = "SELECT * FROM h2.tmp.`drill_h2_test`.`data_types`";
- DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
-
- TupleMetadata expectedSchema = new SchemaBuilder()
- .addNullable("int_field", MinorType.INT, 10)
- .addNullable("bigint_field", MinorType.BIGINT, 19)
- .addNullable("float4_field", MinorType.VARDECIMAL, 38, 37)
- .addNullable("float8_field", MinorType.VARDECIMAL, 38, 37)
- .addNullable("varchar_field", MinorType.VARCHAR, 38)
- .addNullable("date_field", MinorType.DATE, 10)
- .addNullable("time_field", MinorType.TIME, 8)
- .addNullable("timestamp_field", MinorType.TIMESTAMP, 26, 8)
- .addNullable("boolean_field", MinorType.BIT, 1)
- .buildSchema();
-
- RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
- .addRow(1, 2L, 3.0, 4.0, "5.0", LocalDate.parse("2021-01-01"),
LocalTime.parse("12:00"), 1451516155000L, true)
- .build();
-
- RowSetUtilities.verify(expected, results);
-
- // Now drop the table
- String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`data_types`";
- QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
- assertTrue(dropResults.succeeded());
+ try {
+ // Query the table to see if the insertion was successful
+ String testQuery = String.format("SELECT * FROM %s", TEST_TABLE);
+ DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
+
+ TupleMetadata expectedSchema = new SchemaBuilder()
+ .addNullable("int_field", MinorType.INT, 32)
+ .addNullable("bigint_field", MinorType.BIGINT, 38)
+ .addNullable("float4_field", MinorType.FLOAT4, 38)
+ .addNullable("float8_field", MinorType.FLOAT8, 38)
+ .addNullable("varchar_field", MinorType.VARCHAR, 38)
+ .addNullable("date_field", MinorType.DATE, 10)
+ .addNullable("time_field", MinorType.TIME, 8)
+ .addNullable("timestamp_field", MinorType.TIMESTAMP, 26, 6)
+ .addNullable("boolean_field", MinorType.BIT, 1)
+ .buildSchema();
+
+ RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+ .addRow(1, 2L, 3.0, 4.0, "5.0", LocalDate.parse("2021-01-01"),
LocalTime.parse("12:00"), 1451516155000L, true)
+ .build();
+
+ RowSetUtilities.verify(expected, results);
+ } finally {
+ QuerySummary dropResults = queryBuilder().sql(DROP_TEST_TABLE).run();
+ assertTrue(dropResults.succeeded());
+ }
}
@Test
public void testBasicCTASWithSpacesInFieldNames() throws Exception {
- String query = "CREATE TABLE h2.tmp.`drill_h2_test`.`test table` (`My id`,
`My name`) AS SELECT * FROM (VALUES(1,2), (3,4))";
+ String query = String.format("CREATE TABLE %s (`My id`, `My name`) AS
SELECT * FROM (VALUES(1,2), (3,4))", TEST_TABLE);
// Create the table and insert the values
QuerySummary insertResults = queryBuilder().sql(query).run();
assertTrue(insertResults.succeeded());
- // Query the table to see if the insertion was successful
- String testQuery = "SELECT * FROM h2.tmp.`drill_h2_test`.`test table`";
- DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
-
- TupleMetadata expectedSchema = new SchemaBuilder()
- .add("My id", MinorType.BIGINT, DataMode.OPTIONAL)
- .add("My name", MinorType.BIGINT, DataMode.OPTIONAL)
- .buildSchema();
-
- RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
- .addRow(1L, 2L)
- .addRow(3L, 4L)
- .build();
-
- RowSetUtilities.verify(expected, results);
-
- // Now drop the table
- String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`test table`";
- QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
- assertTrue(dropResults.succeeded());
+ try {
+ // Query the table to see if the insertion was successful
+ String testQuery = String.format("SELECT * FROM %s", TEST_TABLE);
+ DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
+
+ TupleMetadata expectedSchema = new SchemaBuilder()
+ .add("My id", MinorType.BIGINT, DataMode.OPTIONAL)
+ .add("My name", MinorType.BIGINT, DataMode.OPTIONAL)
+ .buildSchema();
+
+ RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+ .addRow(1L, 2L)
+ .addRow(3L, 4L)
+ .build();
+
+ RowSetUtilities.verify(expected, results);
+ } finally {
+ QuerySummary dropResults = queryBuilder().sql(DROP_TEST_TABLE).run();
+ assertTrue(dropResults.succeeded());
+ }
}
@Test
public void testCTASFromFileWithNulls() throws Exception {
- String sql = "CREATE TABLE h2.tmp.`drill_h2_test`.`t1` AS SELECT
int_field, float_field, varchar_field, boolean_field FROM
cp.`json/dataTypes.json`";
+ String sql = String.format("CREATE TABLE %s AS SELECT int_field,
float_field, varchar_field, boolean_field FROM cp.`json/dataTypes.json`",
TEST_TABLE);
QuerySummary insertResults = queryBuilder().sql(sql).run();
assertTrue(insertResults.succeeded());
- sql = "SELECT * FROM h2.tmp.`drill_h2_test`.`t1`";
- DirectRowSet results = queryBuilder().sql(sql).rowSet();
-
- TupleMetadata expectedSchema = new SchemaBuilder()
- .addNullable("int_field", MinorType.BIGINT, 19)
- .addNullable("float_field", MinorType.VARDECIMAL, 38, 37)
- .addNullable("varchar_field", MinorType.VARCHAR, 38)
- .addNullable("boolean_field", MinorType.BIT, 1)
- .build();
-
- RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
- .addRow(1L, 1.0, "foo1", true)
- .addRow(null, null, null, null)
- .addRow(2L, 2.0, "foo2", false)
- .build();
-
- RowSetUtilities.verify(expected, results);
-
- String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`t1`";
- QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
- assertTrue(dropResults.succeeded());
+ try {
+ sql = String.format("SELECT * FROM %s", TEST_TABLE);
+ DirectRowSet results = queryBuilder().sql(sql).rowSet();
+
+ TupleMetadata expectedSchema = new SchemaBuilder()
+ .addNullable("int_field", MinorType.BIGINT, 38)
+ .addNullable("float_field", MinorType.FLOAT8, 38)
+ .addNullable("varchar_field", MinorType.VARCHAR, 38)
+ .addNullable("boolean_field", MinorType.BIT, 1)
+ .build();
+
+ RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+ .addRow(1L, 1.0, "foo1", true)
+ .addRow(null, null, null, null)
+ .addRow(2L, 2.0, "foo2", false)
+ .build();
+
+ RowSetUtilities.verify(expected, results);
+ } finally {
+ QuerySummary dropResults = queryBuilder().sql(DROP_TEST_TABLE).run();
+ assertTrue(dropResults.succeeded());
+ }
}
@Test
@@ -241,93 +246,99 @@ public class TestJdbcWriterWithH2 extends ClusterTest {
QuerySummary insertResults = queryBuilder().sql(query).run();
assertTrue(insertResults.succeeded());
- // Query the table to see if the insertion was successful
- String testQuery = "SELECT * FROM h2.tmp.`drill_h2_test`.`test table`";
- DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
-
- TupleMetadata expectedSchema = new SchemaBuilder()
- .add("ID", MinorType.BIGINT, DataMode.OPTIONAL)
- .add("NAME", MinorType.BIGINT, DataMode.OPTIONAL)
- .buildSchema();
-
- RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
- .addRow(1L, 2L)
- .addRow(3L, 4L)
- .build();
-
- RowSetUtilities.verify(expected, results);
-
- // Now drop the table
- String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`test table`";
- QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
- assertTrue(dropResults.succeeded());
+ try {
+ // Query the table to see if the insertion was successful
+ String testQuery = "SELECT * FROM h2.tmp.`drill_h2_test`.`test table`";
+ DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
+
+ TupleMetadata expectedSchema = new SchemaBuilder()
+ .add("ID", MinorType.BIGINT, DataMode.OPTIONAL)
+ .add("NAME", MinorType.BIGINT, DataMode.OPTIONAL)
+ .buildSchema();
+
+ RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+ .addRow(1L, 2L)
+ .addRow(3L, 4L)
+ .build();
+
+ RowSetUtilities.verify(expected, results);
+ } finally {
+ String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`test table`";
+ QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
+ assertTrue(dropResults.succeeded());
+ }
}
@Test
public void testBasicCTASIfNotExists() throws Exception {
- String query = "CREATE TABLE IF NOT EXISTS
h2.tmp.`drill_h2_test`.`test_table` (ID, NAME) AS SELECT * FROM (VALUES(1,2),
(3,4))";
+ String query = String.format("CREATE TABLE IF NOT EXISTS %s (ID, NAME) AS
SELECT * FROM (VALUES(1,2), (3,4))", TEST_TABLE);
// Create the table and insert the values
QuerySummary insertResults = queryBuilder().sql(query).run();
assertTrue(insertResults.succeeded());
- // Query the table to see if the insertion was successful
- String testQuery = "SELECT * FROM h2.tmp.`drill_h2_test`.`test_table`";
- DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
-
- TupleMetadata expectedSchema = new SchemaBuilder()
- .add("ID", MinorType.BIGINT, DataMode.OPTIONAL)
- .add("NAME", MinorType.BIGINT, DataMode.OPTIONAL)
- .buildSchema();
-
- RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
- .addRow(1L, 2L)
- .addRow(3L, 4L)
- .build();
-
- RowSetUtilities.verify(expected, results);
-
- // Now drop the table
- String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`test_table`";
- QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
- assertTrue(dropResults.succeeded());
+ try {
+ // Query the table to see if the insertion was successful
+ String testQuery = String.format("SELECT * FROM %s", TEST_TABLE);
+ DirectRowSet results = queryBuilder().sql(testQuery).rowSet();
+
+ TupleMetadata expectedSchema = new SchemaBuilder()
+ .add("ID", MinorType.BIGINT, DataMode.OPTIONAL)
+ .add("NAME", MinorType.BIGINT, DataMode.OPTIONAL)
+ .buildSchema();
+
+ RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+ .addRow(1L, 2L)
+ .addRow(3L, 4L)
+ .build();
+
+ RowSetUtilities.verify(expected, results);
+ } finally {
+ QuerySummary dropResults = queryBuilder().sql(DROP_TEST_TABLE).run();
+ assertTrue(dropResults.succeeded());
+ }
}
@Test
public void testCTASWithDuplicateTable() throws Exception {
- String query = "CREATE TABLE h2.tmp.`drill_h2_test`.`test_table` (ID,
NAME) AS SELECT * FROM (VALUES(1,2), (3,4))";
+ String query = String.format("CREATE TABLE %s (ID, NAME) AS SELECT * FROM
(VALUES(1,2), (3,4))", TEST_TABLE);
// Create the table and insert the values
QuerySummary insertResults = queryBuilder().sql(query).run();
assertTrue(insertResults.succeeded());
- // Run the query again, should fail.
try {
- queryBuilder().sql(query).run();
- fail();
- } catch (UserRemoteException e) {
- assertTrue(e.getMessage().contains("VALIDATION ERROR"));
+ // Run the query again, should fail.
+ try {
+ queryBuilder().sql(query).run();
+ fail();
+ } catch (UserRemoteException e) {
+ assertTrue(e.getMessage().contains("VALIDATION ERROR"));
+ }
+
+ // Try again with IF NOT EXISTS, Should not do anything, but not throw
an exception
+ query = String.format("CREATE TABLE IF NOT EXISTS %s (ID, NAME) AS
SELECT * FROM (VALUES(1,2), (3,4))", TEST_TABLE);
+ DirectRowSet results = queryBuilder().sql(query).rowSet();
+
+ TupleMetadata expectedSchema = new SchemaBuilder()
+ .add("ok", MinorType.BIT)
+ .add("summary", MinorType.VARCHAR, DataMode.OPTIONAL)
+ .buildSchema();
+
+ RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+ .addRow(false, "A table or view with given name [test_table] already
exists in schema [h2.tmp.drill_h2_test]")
+ .build();
+
+ RowSetUtilities.verify(expected, results);
+ } finally {
+ QuerySummary dropResults = queryBuilder().sql(DROP_TEST_TABLE).run();
+ assertTrue(dropResults.succeeded());
}
-
- // Try again with IF NOT EXISTS, Should not do anything, but not throw an
exception
- query = "CREATE TABLE IF NOT EXISTS h2.tmp.`drill_h2_test`.`test_table`
(ID, NAME) AS SELECT * FROM (VALUES(1,2), (3,4))";
- DirectRowSet results = queryBuilder().sql(query).rowSet();
-
- TupleMetadata expectedSchema = new SchemaBuilder()
- .add("ok", MinorType.BIT)
- .add("summary", MinorType.VARCHAR, DataMode.OPTIONAL)
- .buildSchema();
-
- RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
- .addRow(false, "A table or view with given name [test_table] already
exists in schema [h2.tmp.drill_h2_test]")
- .build();
-
- RowSetUtilities.verify(expected, results);
}
@Test
public void testWithComplexData() throws Exception {
// JDBC Writer does not support writing complex types at this time.
try {
- String sql = "CREATE TABLE h2.tmp.`drill_h2_test`.`complex` AS SELECT *
FROM cp.`json/complexData.json`";
+ String sql = String.format("CREATE TABLE %s AS SELECT * FROM
cp.`json/complexData.json`", TEST_TABLE);
queryBuilder().sql(sql).run();
fail();
} catch (UserRemoteException e) {
@@ -337,34 +348,35 @@ public class TestJdbcWriterWithH2 extends ClusterTest {
@Test
public void testCTASFromFileWithUglyData() throws Exception {
- String sql = "CREATE TABLE h2.tmp.`drill_h2_test`.`t2` AS SELECT ugly1,
ugly2 FROM cp.`json/uglyData.json`";
+ String sql = String.format("CREATE TABLE %s AS SELECT ugly1, ugly2 FROM
cp.`json/uglyData.json`", TEST_TABLE);
QuerySummary insertResults = queryBuilder().sql(sql).run();
assertTrue(insertResults.succeeded());
- sql = "SELECT * FROM h2.tmp.`drill_h2_test`.`t2`";
- DirectRowSet results = queryBuilder().sql(sql).rowSet();
-
- TupleMetadata expectedSchema = new SchemaBuilder()
- .addNullable("ugly1", MinorType.VARCHAR, 38)
- .addNullable("ugly2", MinorType.VARCHAR, 38)
- .build();
-
- RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
- .addRow("O'Malley", "Abraham Lincoln's best speech started with: \"Four
score and seven years ago...")
- .build();
-
- RowSetUtilities.verify(expected, results);
-
- String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`t2`";
- QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
- assertTrue(dropResults.succeeded());
+ try {
+ sql = String.format("SELECT * FROM %s", TEST_TABLE);
+ DirectRowSet results = queryBuilder().sql(sql).rowSet();
+
+ TupleMetadata expectedSchema = new SchemaBuilder()
+ .addNullable("ugly1", MinorType.VARCHAR, 38)
+ .addNullable("ugly2", MinorType.VARCHAR, 38)
+ .build();
+
+ RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+ .addRow("O'Malley", "Abraham Lincoln's best speech started with:
\"Four score and seven years ago...")
+ .build();
+
+ RowSetUtilities.verify(expected, results);
+ } finally {
+ QuerySummary dropResults = queryBuilder().sql(DROP_TEST_TABLE).run();
+ assertTrue(dropResults.succeeded());
+ }
}
@Test
public void testWithArrayField() throws Exception {
// JDBC Writer does not support writing arrays at this time.
try {
- String sql = "CREATE TABLE h2.tmp.`drill_h2_test`.`complex` AS SELECT *
FROM cp.`json/repeatedData.json`";
+ String sql = String.format("CREATE TABLE %s AS SELECT * FROM
cp.`json/repeatedData.json`", TEST_TABLE);
queryBuilder().sql(sql).run();
fail();
} catch (UserRemoteException e) {
@@ -374,19 +386,19 @@ public class TestJdbcWriterWithH2 extends ClusterTest {
@Test
public void testWithLargeFile() throws Exception {
- String query = "CREATE TABLE h2.tmp.`drill_h2_test`.`t2`
(id,first_name,last_name,email,gender,ip_address) AS " +
- "SELECT id,first_name,last_name,email,gender,ip_address FROM
cp.`csv/large_csv.csvh`";
+ String query = String.format("CREATE TABLE %s
(id,first_name,last_name,email,gender,ip_address) AS " +
+ "SELECT id,first_name,last_name,email,gender,ip_address FROM
cp.`csv/large_csv.csvh`", TEST_TABLE);
QuerySummary insertResults = queryBuilder().sql(query).run();
assertTrue(insertResults.succeeded());
- query = "SELECT COUNT(*) FROM h2.tmp.`drill_h2_test`.`t2`";
- long rowCount = queryBuilder().sql(query).singletonLong();
- assertEquals(6000, rowCount);
-
- // Now drop the table
- String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`t2`";
- QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
- assertTrue(dropResults.succeeded());
+ try {
+ query = String.format("SELECT COUNT(*) FROM %s", TEST_TABLE);
+ long rowCount = queryBuilder().sql(query).singletonLong();
+ assertEquals(6000, rowCount);
+ } finally {
+ QuerySummary dropResults = queryBuilder().sql(DROP_TEST_TABLE).run();
+ assertTrue(dropResults.succeeded());
+ }
}
@Test
@@ -403,42 +415,43 @@ public class TestJdbcWriterWithH2 extends ClusterTest {
long resultsCount = queryBuilder().sql(testQuery).singletonLong();
assertEquals(100000, resultsCount);
- String ctasQuery = "CREATE TABLE h2.tmp.`drill_h2_test`.`t2` AS " +
+ String ctasQuery = String.format("CREATE TABLE %s AS ", TEST_TABLE) +
"SELECT * FROM dfs.`csv/very_large_file.csvh`";
QuerySummary insertResults = queryBuilder().sql(ctasQuery).run();
assertTrue(insertResults.succeeded());
- // Query the table to see if the insertion was successful
- testQuery = "SELECT COUNT(*) FROM h2.tmp.`drill_h2_test`.`t2`";
- resultsCount = queryBuilder().sql(testQuery).singletonLong();
- assertEquals(100000, resultsCount);
-
- String dropQuery = "DROP TABLE h2.tmp.`drill_h2_test`.`t2`";
- QuerySummary dropResults = queryBuilder().sql(dropQuery).run();
- assertTrue(dropResults.succeeded());
-
- boolean deletedFile =
JdbcTestUtils.deleteCsvFile(String.valueOf(generatedFile));
- if (!deletedFile) {
- fail();
+ try {
+ // Query the table to see if the insertion was successful
+ testQuery = String.format("SELECT COUNT(*) FROM %s", TEST_TABLE);
+ resultsCount = queryBuilder().sql(testQuery).singletonLong();
+ assertEquals(100000, resultsCount);
+ } finally {
+ QuerySummary dropResults = queryBuilder().sql(DROP_TEST_TABLE).run();
+ assertTrue(dropResults.succeeded());
+
+ boolean deletedFile =
JdbcTestUtils.deleteCsvFile(String.valueOf(generatedFile));
+ if (!deletedFile) {
+ fail();
+ }
}
}
@Test
public void testUnwritableConnection() throws Exception {
try {
- String query = "CREATE TABLE IF NOT EXISTS
h2o_unwritable.tmp.`test_table` (ID, NAME) AS SELECT * FROM (VALUES(1,2),
(3,4))";
+ String query = "CREATE TABLE IF NOT EXISTS
h2_unwritable.tmp.`test_table` (ID, NAME) AS SELECT * FROM (VALUES(1,2),
(3,4))";
queryBuilder().sql(query).run();
fail();
} catch (UserRemoteException e) {
- assertTrue(e.getMessage().contains("VALIDATION ERROR: Unable to create
or drop objects. Schema [h2o_unwritable.tmp] is immutable."));
+ assertTrue(e.getMessage().contains("VALIDATION ERROR: Unable to create
or drop objects. Schema [h2_unwritable.tmp] is immutable."));
}
try {
- String query = "CREATE TABLE h2o_unwritable.tmp.`test_table` (ID, NAME)
AS SELECT * FROM (VALUES(1,2), (3,4))";
+ String query = "CREATE TABLE h2_unwritable.tmp.`test_table` (ID, NAME)
AS SELECT * FROM (VALUES(1,2), (3,4))";
queryBuilder().sql(query).run();
fail();
} catch (UserRemoteException e) {
- assertTrue(e.getMessage().contains("VALIDATION ERROR: Unable to create
or drop objects. Schema [h2o_unwritable.tmp] is immutable."));
+ assertTrue(e.getMessage().contains("VALIDATION ERROR: Unable to create
or drop objects. Schema [h2_unwritable.tmp] is immutable."));
}
}
}
diff --git
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithMySQL.java
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithMySQL.java
index 0ce5314..c5d5a99 100644
---
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithMySQL.java
+++
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithMySQL.java
@@ -269,8 +269,8 @@ public class TestJdbcWriterWithMySQL extends ClusterTest {
TupleMetadata expectedSchema = new SchemaBuilder()
.addNullable("int_field", MinorType.INT, 10)
.addNullable("bigint_field", MinorType.BIGINT, 19)
- .addNullable("float4_field", MinorType.VARDECIMAL, 10)
- .addNullable("float8_field", MinorType.VARDECIMAL, 10)
+ .addNullable("float4_field", MinorType.FLOAT8, 12)
+ .addNullable("float8_field", MinorType.FLOAT8, 22)
.addNullable("varchar_field", MinorType.VARCHAR, 38)
.addNullable("date_field", MinorType.DATE, 10)
.addNullable("time_field", MinorType.TIME, 10)
@@ -301,7 +301,7 @@ public class TestJdbcWriterWithMySQL extends ClusterTest {
TupleMetadata expectedSchema = new SchemaBuilder()
.addNullable("int_field", MinorType.BIGINT, 19)
- .addNullable("float_field", MinorType.VARDECIMAL, 10)
+ .addNullable("float_field", MinorType.FLOAT8, 22)
.addNullable("varchar_field", MinorType.VARCHAR, 38,0)
.addNullable("boolean_field", MinorType.BIT, 1)
.build();
diff --git
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithPostgres.java
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithPostgres.java
index b517353..7815de1 100644
---
a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithPostgres.java
+++
b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcWriterWithPostgres.java
@@ -148,8 +148,8 @@ public class TestJdbcWriterWithPostgres extends ClusterTest
{
TupleMetadata expectedSchema = new SchemaBuilder()
.addNullable("int_field", MinorType.INT, 10)
.addNullable("bigint_field", MinorType.BIGINT, 19)
- .addNullable("float4_field", MinorType.FLOAT8, 12)
- .addNullable("float8_field", MinorType.FLOAT8, 22)
+ .addNullable("float4_field", MinorType.FLOAT8, 17, 17)
+ .addNullable("float8_field", MinorType.FLOAT8, 17, 17)
.addNullable("varchar_field", MinorType.VARCHAR, 38)
.addNullable("date_field", MinorType.DATE, 10)
.addNullable("time_field", MinorType.TIME, 10)
diff --git a/contrib/storage-jdbc/src/test/resources/h2-test-data.sql
b/contrib/storage-jdbc/src/test/resources/h2-test-data.sql
index 6f3ecca..8686d82 100644
--- a/contrib/storage-jdbc/src/test/resources/h2-test-data.sql
+++ b/contrib/storage-jdbc/src/test/resources/h2-test-data.sql
@@ -16,7 +16,7 @@ create table person (
bigint_field BIGINT,
smallint_field SMALLINT,
- numeric_field NUMERIC(10, 2),
+ numeric_field NUMERIC(20, 2),
boolean_field BOOLEAN,
double_field DOUBLE,
float_field FLOAT,
@@ -59,4 +59,4 @@ insert into person (person_id) values (5);
create SCHEMA drill_h2_test_1;
set schema drill_h2_test_1;
create table person(person_id INT NOT NULL PRIMARY KEY);
-set schema drill_h2_test;
\ No newline at end of file
+set schema drill_h2_test;
diff --git
a/contrib/storage-jdbc/src/test/resources/h2_information_schema_tables.csv
b/contrib/storage-jdbc/src/test/resources/h2_information_schema_tables.csv
index 971ee0f..44d3e34 100644
--- a/contrib/storage-jdbc/src/test/resources/h2_information_schema_tables.csv
+++ b/contrib/storage-jdbc/src/test/resources/h2_information_schema_tables.csv
@@ -1,33 +1,35 @@
-h2.tmp.information_schema,CATALOGS
-h2.tmp.information_schema,COLLATIONS
-h2.tmp.information_schema,COLUMNS
-h2.tmp.information_schema,COLUMN_PRIVILEGES
+h2.tmp.information_schema,ENUM_VALUES
h2.tmp.information_schema,CONSTANTS
-h2.tmp.information_schema,CONSTRAINTS
-h2.tmp.information_schema,CROSS_REFERENCES
-h2.tmp.information_schema,DOMAINS
-h2.tmp.information_schema,FUNCTION_ALIASES
-h2.tmp.information_schema,FUNCTION_COLUMNS
-h2.tmp.information_schema,HELP
-h2.tmp.information_schema,INDEXES
-h2.tmp.information_schema,IN_DOUBT
+h2.tmp.information_schema,SEQUENCES
+h2.tmp.information_schema,RIGHTS
+h2.tmp.information_schema,TRIGGERS
+h2.tmp.information_schema,SETTINGS
+h2.tmp.information_schema,VIEWS
+h2.tmp.information_schema,INFORMATION_SCHEMA_CATALOG_NAME
+h2.tmp.information_schema,ROUTINES
+h2.tmp.information_schema,COLUMNS
h2.tmp.information_schema,KEY_COLUMN_USAGE
h2.tmp.information_schema,LOCKS
-h2.tmp.information_schema,QUERY_STATISTICS
+h2.tmp.information_schema,DOMAINS
+h2.tmp.information_schema,COLUMN_PRIVILEGES
+h2.tmp.information_schema,SCHEMATA
+h2.tmp.information_schema,FIELDS
+h2.tmp.information_schema,CONSTRAINT_COLUMN_USAGE
+h2.tmp.information_schema,SESSION_STATE
h2.tmp.information_schema,REFERENTIAL_CONSTRAINTS
-h2.tmp.information_schema,RIGHTS
+h2.tmp.information_schema,TABLE_PRIVILEGES
+h2.tmp.information_schema,PARAMETERS
+h2.tmp.information_schema,TABLES
+h2.tmp.information_schema,ELEMENT_TYPES
+h2.tmp.information_schema,QUERY_STATISTICS
+h2.tmp.information_schema,INDEX_COLUMNS
h2.tmp.information_schema,ROLES
-h2.tmp.information_schema,SCHEMATA
-h2.tmp.information_schema,SEQUENCES
+h2.tmp.information_schema,CHECK_CONSTRAINTS
h2.tmp.information_schema,SESSIONS
-h2.tmp.information_schema,SESSION_STATE
-h2.tmp.information_schema,SETTINGS
+h2.tmp.information_schema,IN_DOUBT
+h2.tmp.information_schema,COLLATIONS
+h2.tmp.information_schema,DOMAIN_CONSTRAINTS
h2.tmp.information_schema,SYNONYMS
-h2.tmp.information_schema,TABLES
-h2.tmp.information_schema,TABLE_CONSTRAINTS
-h2.tmp.information_schema,TABLE_PRIVILEGES
-h2.tmp.information_schema,TABLE_TYPES
-h2.tmp.information_schema,TRIGGERS
-h2.tmp.information_schema,TYPE_INFO
h2.tmp.information_schema,USERS
-h2.tmp.information_schema,VIEWS
\ No newline at end of file
+h2.tmp.information_schema,TABLE_CONSTRAINTS
+h2.tmp.information_schema,INDEXES
diff --git a/distribution/pom.xml b/distribution/pom.xml
index 8c9b6b8..fcaeed3 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -199,28 +199,30 @@
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-tcnative</artifactId>
- <version>2.0.1.Final</version>
+ <version>2.0.48.Final</version>
<classifier>linux-x86_64</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-tcnative</artifactId>
- <version>2.0.1.Final</version>
+ <version>2.0.48.Final</version>
<classifier>linux-x86_64-fedora</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-tcnative</artifactId>
- <version>2.0.1.Final</version>
+ <version>2.0.48.Final</version>
<classifier>osx-x86_64</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-tcnative</artifactId>
- <version>2.0.1.Final</version>
+ <!-- bump warning: windows-x86_64 jars apparently stopped being published
+ after 2.0.36, see
https://repo1.maven.org/maven2/io/netty/netty-tcnative/ -->
+ <version>2.0.36.Final</version>
<classifier>windows-x86_64</classifier>
<scope>test</scope>
</dependency>
diff --git a/distribution/src/assemble/component.xml
b/distribution/src/assemble/component.xml
index 4031c50..853793d 100644
--- a/distribution/src/assemble/component.xml
+++ b/distribution/src/assemble/component.xml
@@ -116,7 +116,6 @@
<include>org.glassfish.jersey.core</include>
<include>org.glassfish.jersey.ext</include>
<include>org.glassfish.jersey.media</include>
- <include>org.glassfish.jersey.bundles.repackaged</include>
<include>org.glassfish.hk2</include>
<include>javax.servlet</include>
<include>javax.ws.rs</include>
@@ -158,7 +157,6 @@
<exclude>org.glassfish.jersey.core</exclude>
<exclude>org.glassfish.jersey.ext</exclude>
<exclude>org.glassfish.jersey.media</exclude>
- <exclude>org.glassfish.jersey.bundles.repackaged</exclude>
<exclude>org.glassfish.hk2</exclude>
<exclude>javax.servlet</exclude>
<exclude>javax.ws.rs </exclude>
@@ -186,7 +184,7 @@
<unpack>false</unpack>
<useProjectArtifact>false</useProjectArtifact>
<includes>
- <include>io.netty:netty-tcnative:jar:linux-x86_64:2.0.1.Final</include>
+
<include>io.netty:netty-tcnative:jar:linux-x86_64:2.0.48.Final</include>
</includes>
<scope>test</scope>
</dependencySet>
@@ -195,7 +193,7 @@
<unpack>false</unpack>
<useProjectArtifact>false</useProjectArtifact>
<includes>
-
<include>io.netty:netty-tcnative:jar:linux-x86_64-fedora:2.0.1.Final</include>
+
<include>io.netty:netty-tcnative:jar:linux-x86_64-fedora:2.0.48.Final</include>
</includes>
<scope>test</scope>
</dependencySet>
@@ -204,7 +202,7 @@
<unpack>false</unpack>
<useProjectArtifact>false</useProjectArtifact>
<includes>
-
<include>io.netty:netty-tcnative:jar:windows-x86_64:2.0.1.Final</include>
+
<include>io.netty:netty-tcnative:jar:windows-x86_64:2.0.36.Final</include>
</includes>
<scope>test</scope>
</dependencySet>
@@ -213,7 +211,7 @@
<unpack>false</unpack>
<useProjectArtifact>false</useProjectArtifact>
<includes>
- <include>io.netty:netty-tcnative:jar:osx-x86_64:2.0.1.Final</include>
+ <include>io.netty:netty-tcnative:jar:osx-x86_64:2.0.48.Final</include>
</includes>
<scope>test</scope>
</dependencySet>
diff --git a/distribution/src/main/resources/LICENSE
b/distribution/src/main/resources/LICENSE
index 650d21b..6c5fa19 100644
--- a/distribution/src/main/resources/LICENSE
+++ b/distribution/src/main/resources/LICENSE
@@ -635,7 +635,6 @@ This product bundles
jersey-ext-mvc (org.glassfish.jersey.ext:jersey-mvc:2.8 -
https://jersey.java.net/project/jersey-mvc/)
jersey-ext-mvc-freemarker
(org.glassfish.jersey.ext:jersey-mvc-freemarker:2.8 -
https://jersey.java.net/project/jersey-mvc-freemarker/)
jersey-media-multipart
(org.glassfish.jersey.media:jersey-media-multipart:2.8 -
https://jersey.java.net/project/jersey-media-multipart/)
- jersey-repackaged-guava
(org.glassfish.jersey.bundles.repackaged:jersey-guava:2.8 -
https://jersey.java.net/project/project/jersey-guava/)
OSGi resource locator bundle - used by various API providers that rely on
META-INF/services mechanism to locate providers.
(org.glassfish.hk2:osgi-resource-locator:1.0.1 -
http://glassfish.org/osgi-resource-locator/)
ServiceLocator Default Implementation (org.glassfish.hk2:hk2-locator:2.2.0 -
https://hk2.java.net/hk2-locator)
servlet-api (javax.servlet:servlet-api:2.4 - no url defined)
@@ -944,4 +943,4 @@ Copyright (C) 2004 Sam Hocevar <[email protected]>
The following libraries are from the public domain.
XZ for Java (org.tukaani:xz:1.0 - http://tukaani.org/xz/java.html)
-================================================================================
\ No newline at end of file
+================================================================================
diff --git a/exec/java-exec/pom.xml b/exec/java-exec/pom.xml
index bea536c..be68121 100644
--- a/exec/java-exec/pom.xml
+++ b/exec/java-exec/pom.xml
@@ -380,7 +380,7 @@
<dependency>
<groupId>com.tdunning</groupId>
<artifactId>t-digest</artifactId>
- <version>3.2</version>
+ <version>3.3</version>
</dependency>
<dependency>
<groupId>com.squareup.okhttp3</groupId>
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropTableHandler.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropTableHandler.java
index ab0427a..11980dd 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropTableHandler.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropTableHandler.java
@@ -68,7 +68,9 @@ public class DropTableHandler extends DefaultSqlHandler {
} else {
AbstractSchema drillSchema =
SchemaUtilites.resolveToMutableDrillSchema(defaultSchema, tableSchema);
Table tableToDrop = SqlHandlerUtil.getTableFromSchema(drillSchema,
originalTableName);
- if (tableToDrop == null || tableToDrop.getJdbcTableType() !=
Schema.TableType.TABLE) {
+ // TableType.OTHER started getting reported for H2 DB when it was
upgraded to v2.
+ if (tableToDrop == null || (tableToDrop.getJdbcTableType() !=
Schema.TableType.TABLE &&
+ tableToDrop.getJdbcTableType() != Schema.TableType.OTHER)) {
if (dropTableNode.checkTableExistence()) {
return DirectPlan.createDirectPlan(context, false,
String.format("Table [%s] not found", originalTableName));
} else {
diff --git
a/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ColumnChunkIncReadStore.java
b/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ColumnChunkIncReadStore.java
index 773a861..3ad0a7a 100644
---
a/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ColumnChunkIncReadStore.java
+++
b/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ColumnChunkIncReadStore.java
@@ -18,6 +18,7 @@
package org.apache.parquet.hadoop;
import java.io.IOException;
+import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
@@ -215,20 +216,20 @@ public class ColumnChunkIncReadStore implements
PageReadStore {
// Note that the repetition and definition levels are stored
uncompressed in
// the v2 page format.
int pageBufOffset = 0;
- ByteBuffer bb = (ByteBuffer) pageBuf.position(pageBufOffset);
+ ByteBuffer bb = (ByteBuffer)
((Buffer)pageBuf).position(pageBufOffset);
BytesInput repLevelBytes = BytesInput.from(
(ByteBuffer) bb.slice().limit(pageBufOffset + repLevelSize)
);
pageBufOffset += repLevelSize;
- bb = (ByteBuffer) pageBuf.position(pageBufOffset);
+ bb = (ByteBuffer) ((Buffer)pageBuf).position(pageBufOffset);
final BytesInput defLevelBytes = BytesInput.from(
(ByteBuffer) bb.slice().limit(pageBufOffset + defLevelSize)
);
pageBufOffset += defLevelSize;
// we've now reached the beginning of compressed column data
- bb = (ByteBuffer) pageBuf.position(pageBufOffset);
+ bb = (ByteBuffer) ((Buffer)pageBuf).position(pageBufOffset);
final BytesInput colDataBytes = decompressor.decompress(
BytesInput.from((ByteBuffer) bb.slice()),
pageSize - repLevelSize - defLevelSize
diff --git
a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestAnalyze.java
b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestAnalyze.java
index 5e866f0..a0292d6 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestAnalyze.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestAnalyze.java
@@ -492,7 +492,7 @@ public class TestAnalyze extends ClusterTest {
.match();
query = "select 1 from dfs.tmp.employee1 where store_id < 15";
- String[] expectedPlan2 = {"Filter\\(condition.*\\).*rowcount = 676.*,.*",
+ String[] expectedPlan2 = {"Filter\\(condition.*\\).*rowcount = 699.*,.*",
"Scan.*columns=\\[`store_id`\\].*rowcount = 1128.0.*"};
queryBuilder()
.sql(query)
diff --git
a/exec/java-exec/src/test/java/org/apache/drill/storage/CredentialsProviderSerDeTest.java
b/exec/java-exec/src/test/java/org/apache/drill/storage/CredentialsProviderSerDeTest.java
index fc2e3d8..75adfbb 100644
---
a/exec/java-exec/src/test/java/org/apache/drill/storage/CredentialsProviderSerDeTest.java
+++
b/exec/java-exec/src/test/java/org/apache/drill/storage/CredentialsProviderSerDeTest.java
@@ -48,7 +48,6 @@ public class CredentialsProviderSerDeTest extends ClusterTest
{
public static final VaultContainer<?> vaultContainer =
new VaultContainer<>(DockerImageName.parse("vault").withTag("1.1.3"))
.withVaultToken(VAULT_TOKEN_VALUE)
- .withVaultPort(8200)
.withSecretInVault(SECRET_PATH,
"top_secret=password1",
"db_password=dbpassword1");
@@ -56,7 +55,7 @@ public class CredentialsProviderSerDeTest extends ClusterTest
{
@BeforeClass
public static void init() throws Exception {
startCluster(ClusterFixture.builder(dirTestWatcher)
- .configProperty(VaultCredentialsProvider.VAULT_ADDRESS, "http://" +
vaultContainer.getHost() + ":" + vaultContainer.getMappedPort(8200))
+ .configProperty(VaultCredentialsProvider.VAULT_ADDRESS, "http://" +
vaultContainer.getHost() + ":" + vaultContainer.getFirstMappedPort())
.configProperty(VaultCredentialsProvider.VAULT_TOKEN,
VAULT_TOKEN_VALUE));
}
diff --git a/exec/jdbc-all/pom.xml b/exec/jdbc-all/pom.xml
index 984aa1e..3cc84a6 100644
--- a/exec/jdbc-all/pom.xml
+++ b/exec/jdbc-all/pom.xml
@@ -33,7 +33,7 @@
"package.namespace.prefix" equals to "oadd.". It can be overridden if
necessary within any profile -->
<properties>
<package.namespace.prefix>oadd.</package.namespace.prefix>
- <jdbc-all-jar.maxsize>50000000</jdbc-all-jar.maxsize>
+ <jdbc-all-jar.maxsize>49300000</jdbc-all-jar.maxsize>
</properties>
<dependencies>
@@ -315,7 +315,7 @@
<!--dependencyReducedPomLocation>${project.build.directory}/generated/shade/dependency-reduced-pom.xml</dependencyReducedPomLocation-->
<minimizeJar>false</minimizeJar>
- <!-- Exclude dependencies at artifact level. Format is
"groupId:artifactId[[:type]:classifier]" -->
+ <!-- Exclude dependencies at artifact level. Format is
"groupId:artifactId[[:type]:classifier]" -->
<artifactSet>
<includes>
<include>*:*</include>
@@ -344,9 +344,6 @@
<exclude>javax.inject:*</exclude>
<exclude>com.beust:*</exclude>
<exclude>jline:*</exclude>
- <exclude>io.netty:netty:jar:3.7.0.Final</exclude>
- <exclude>io.netty:netty-transport-native-epoll:*</exclude>
- <exclude>io.netty:netty-transport-native-unix-common:*</exclude>
<exclude>org.xerial.snappy:*</exclude>
<exclude>org.apache.avro:*</exclude>
<exclude>org.tukaani:*</exclude>
@@ -367,10 +364,15 @@
<exclude>commons-io:commons-io</exclude>
<exclude>commons-beanutils:commons-beanutils-core:jar:*</exclude>
<exclude>commons-beanutils:commons-beanutils:jar:*</exclude>
- <exclude>io.netty:netty-tcnative:jar:*</exclude>
<exclude>com.google.code.findbugs:jsr305:*</exclude>
<exclude>com.esri.geometry:esri-geometry-api:*</exclude>
<exclude>dnsjava:dnsjava:jar:*</exclude>
+ <exclude>io.netty:netty-tcnative:jar:*</exclude>
+ <exclude>io.netty:netty-tcnative-classes:jar:*</exclude>
+ <exclude>com.bettercloud:vault-java-driver:jar:*</exclude>
+ <exclude>com.tdunning:t-digest:jar:*</exclude>
+ <exclude>io.airlift:aircompressor:jar:*</exclude>
+ <exclude>com.rdblue:brotli-codec:jar:*</exclude>
</excludes>
</artifactSet>
<relocations>
@@ -630,7 +632,6 @@
<exclude>javax.inject:*</exclude>
<exclude>com.beust:*</exclude>
<exclude>jline:*</exclude>
- <exclude>io.netty:netty:jar:3.7.0.Final</exclude>
<exclude>org.xerial.snappy:*</exclude>
<exclude>org.apache.avro:*</exclude>
<exclude>org.tukaani:*</exclude>
@@ -656,6 +657,13 @@
<exclude>commons-beanutils:commons-beanutils:jar:*</exclude>
<exclude>com.google.code.findbugs:jsr305:*</exclude>
<exclude>com.esri.geometry:esri-geometry-api:*</exclude>
+ <exclude>dnsjava:dnsjava:jar:*</exclude>
+ <exclude>io.netty:netty-tcnative:jar:*</exclude>
+ <exclude>io.netty:netty-tcnative-classes:jar:*</exclude>
+ <exclude>com.bettercloud:vault-java-driver:jar:*</exclude>
+ <exclude>com.tdunning:t-digest:jar:*</exclude>
+ <exclude>io.airlift:aircompressor:jar:*</exclude>
+ <exclude>com.rdblue:brotli-codec:jar:*</exclude>
</excludes>
</artifactSet>
<relocations>
diff --git a/exec/memory/base/src/main/java/io/netty/buffer/DrillBuf.java
b/exec/memory/base/src/main/java/io/netty/buffer/DrillBuf.java
index c055344..1f5fc7b 100644
--- a/exec/memory/base/src/main/java/io/netty/buffer/DrillBuf.java
+++ b/exec/memory/base/src/main/java/io/netty/buffer/DrillBuf.java
@@ -20,6 +20,7 @@ package io.netty.buffer;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.FileChannel;
@@ -858,7 +859,7 @@ public final class DrillBuf extends AbstractByteBuf
implements AutoCloseable {
if (srcIndex == 0 && src.capacity() == length) {
udle.setBytes(index + offset, src);
} else {
- ByteBuffer newBuf = src.duplicate();
+ Buffer newBuf = src.duplicate();
newBuf.position(srcIndex);
newBuf.limit(srcIndex + length);
udle.setBytes(index + offset, src);
diff --git a/pom.xml b/pom.xml
index 71f0510..272c21b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -64,7 +64,7 @@
<avatica.version>1.17.0</avatica.version>
<janino.version>3.0.11</janino.version>
<sqlline.version>1.12.0</sqlline.version>
- <jackson.version>2.12.1</jackson.version>
+ <jackson.version>2.12.6</jackson.version>
<zookeeper.version>3.5.7</zookeeper.version>
<mapr.release.version>6.1.0-mapr</mapr.release.version>
<ojai.version>3.0-mapr-1808</ojai.version>
@@ -92,20 +92,21 @@
<javassist.version>3.28.0-GA</javassist.version>
<msgpack.version>0.6.6</msgpack.version>
<reflections.version>0.9.10</reflections.version>
- <avro.version>1.9.1</avro.version>
+ <avro.version>1.11.0</avro.version>
<metrics.version>4.0.2</metrics.version>
- <jetty.version>9.4.41.v20210516</jetty.version>
+ <jetty.version>9.4.44.v20210927</jetty.version>
<jersey.version>2.34</jersey.version>
<javax.validation.api>2.0.1.Final</javax.validation.api>
<asm.version>9.2</asm.version>
<excludedGroups />
- <memoryMb>1800</memoryMb>
- <directMemoryMb>3000</directMemoryMb>
+ <memoryMb>2500</memoryMb>
+ <directMemoryMb>2500</directMemoryMb>
<rat.skip>true</rat.skip>
<license.skip>true</license.skip>
<docker.repository>apache/drill</docker.repository>
<antlr.version>4.8-1</antlr.version>
- <maven.version>3.6.3</maven.version>
+ <maven.version>3.8.4</maven.version>
+ <maven.min.version>3.6.3</maven.min.version>
<commons.net.version>3.6</commons.net.version>
<commons.validator.version>1.6</commons.validator.version>
<commons.text.version>1.6</commons.text.version>
@@ -118,7 +119,7 @@
<jna.version>5.8.0</jna.version>
<commons.compress.version>1.21</commons.compress.version>
<hikari.version>4.0.3</hikari.version>
- <netty.version>4.1.59.Final</netty.version>
+ <netty.version>4.1.73.Final</netty.version>
<httpclient.version>4.5.13</httpclient.version>
<libthrift.version>0.14.0</libthrift.version>
<derby.version>10.14.2.0</derby.version>
@@ -631,7 +632,7 @@
<configuration>
<rules>
<requireMavenVersion>
- <version>[${maven.version},4)</version>
+ <version>[${maven.version.min},4)</version>
</requireMavenVersion>
<requireJavaVersion>
<version>[1.8,18)</version>
@@ -1819,7 +1820,7 @@
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-tcnative</artifactId>
- <version>2.0.39.Final</version>
+ <version>2.0.48.Final</version>
<classifier>${netty.tcnative.classifier}</classifier>
<scope>runtime</scope>
<optional>true</optional>