hive git commit: HIVE-20593 : Load Data for partitioned ACID tables fails with bucketId out of range: -1 (Deepak Jaiswal, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/master a036e52df -> 36653c2cd HIVE-20593 : Load Data for partitioned ACID tables fails with bucketId out of range: -1 (Deepak Jaiswal, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/36653c2c Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/36653c2c Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/36653c2c Branch: refs/heads/master Commit: 36653c2cd4af9815151a2453d1b50065510a4fe9 Parents: a036e52 Author: Deepak Jaiswal Authored: Tue Sep 25 22:55:35 2018 -0700 Committer: Deepak Jaiswal Committed: Tue Sep 25 22:55:35 2018 -0700 -- .../hive/ql/parse/LoadSemanticAnalyzer.java | 6 + .../clientpositive/load_data_using_job.q| 18 ++- .../llap/load_data_using_job.q.out | 110 ++- 3 files changed, 128 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/36653c2c/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 8d33cf5..308297e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -23,11 +23,13 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; + import org.antlr.runtime.tree.Tree; import org.apache.commons.codec.DecoderException; import org.apache.commons.codec.net.URLCodec; @@ -40,6 +42,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.StrictChecks; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; @@ -474,6 +477,9 @@ public class LoadSemanticAnalyzer extends SemanticAnalyzer { // wipe out partition columns tempTableObj.setPartCols(new ArrayList<>()); +// Reset table params +tempTableObj.setParameters(new HashMap<>()); + // Set data location and input format, it must be text tempTableObj.setDataLocation(new Path(fromURI)); if (inputFormatClassName != null && serDeClassName != null) { http://git-wip-us.apache.org/repos/asf/hive/blob/36653c2c/ql/src/test/queries/clientpositive/load_data_using_job.q -- diff --git a/ql/src/test/queries/clientpositive/load_data_using_job.q b/ql/src/test/queries/clientpositive/load_data_using_job.q index b760d9b..970a752 100644 --- a/ql/src/test/queries/clientpositive/load_data_using_job.q +++ b/ql/src/test/queries/clientpositive/load_data_using_job.q @@ -91,4 +91,20 @@ load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'; select * from srcbucket_mapjoin_n8; -drop table srcbucket_mapjoin_n8; \ No newline at end of file +drop table srcbucket_mapjoin_n8; + +-- Load into ACID table using ORC files +set hive.mapred.mode=nonstrict; +set hive.optimize.ppd=true; +set hive.optimize.index.filter=true; +set hive.tez.bucket.pruning=true; +set hive.explain.user=false; +set hive.fetch.task.conversion=none; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +CREATE TABLE orc_test_txn (`id` integer, name string, dept string) PARTITIONED BY (year integer) STORED AS ORC TBLPROPERTIES('transactional'='true'); +explain load data local inpath '../../data/files/load_data_job_acid' into table orc_test_txn; +load data local inpath '../../data/files/load_data_job_acid' into table orc_test_txn; + +select * from orc_test_txn; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hive/blob/36653c2c/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out -- diff --git a/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out b/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out index 765ffdf..8a82467 100644 --- a/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out +++ b/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out @@ -977,16
hive git commit: HIVE-17684: HoS memory issues with MapJoinMemoryExhaustionHandler (Misha Dmitriev, reviewed by Sahil Takiar)
Repository: hive Updated Branches: refs/heads/master 4137c212c -> a036e52df HIVE-17684: HoS memory issues with MapJoinMemoryExhaustionHandler (Misha Dmitriev, reviewed by Sahil Takiar) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a036e52d Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a036e52d Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a036e52d Branch: refs/heads/master Commit: a036e52dff85215850dee365e4093b436d301f42 Parents: 4137c21 Author: Misha Dmitriev Authored: Tue Sep 25 18:05:03 2018 -0700 Committer: Sahil Takiar Committed: Tue Sep 25 18:05:16 2018 -0700 -- .../hadoop/hive/common/GcTimeMonitor.java | 261 +++ .../org/apache/hadoop/hive/conf/HiveConf.java | 12 +- data/conf/hive-site.xml | 10 + data/conf/spark/standalone/hive-site.xml| 7 +- pom.xml | 1 + .../ql/exec/DefaultMemoryExhaustionChecker.java | 45 .../hive/ql/exec/HashTableSinkOperator.java | 9 +- .../hive/ql/exec/MemoryExhaustionChecker.java | 28 ++ .../ql/exec/MemoryExhaustionCheckerFactory.java | 39 +++ .../ql/exec/SparkMemoryExhaustionChecker.java | 91 +++ 10 files changed, 494 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/a036e52d/common/src/java/org/apache/hadoop/hive/common/GcTimeMonitor.java -- diff --git a/common/src/java/org/apache/hadoop/hive/common/GcTimeMonitor.java b/common/src/java/org/apache/hadoop/hive/common/GcTimeMonitor.java new file mode 100644 index 000..edba6f9 --- /dev/null +++ b/common/src/java/org/apache/hadoop/hive/common/GcTimeMonitor.java @@ -0,0 +1,261 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + +import java.util.List; + +/** + * Based on org.apache.hadoop.util.GcTimeMonitor. However, this class detects + * GC pauses using the same method as JvmPauseMonitor (by comparing the actual + * and expected thread sleep time) rather than by reading information from + * GarbageCollectionMXBean. The latter may sometimes report time spent in + * concurrent GC operations rather than GC pauses. This may result in inaccurate + * results when trying to estimate the time that the JVM is "frozen" due to GC. + * + * This class monitors the percentage of time the JVM is paused in GC within + * the specified observation window, say 1 minute. The user can provide a + * hook which will be called whenever this percentage exceeds the specified + * threshold. + */ +public class GcTimeMonitor extends Thread { + + private final long maxGcTimePercentage; + private final long observationWindowNanos, sleepIntervalMs; + private final GcTimeAlertHandler alertHandler; + + // Ring buffers containing GC timings and timestamps when timings were taken + private final TsAndData[] gcDataBuf; + private int bufSize, startIdx, endIdx; + + private long startTimeNanos; + private final GcData curData = new GcData(); + private volatile boolean shouldRun = true; + + /** + * Create an instance of GCTimeMonitor. Once it's started, it will stay alive + * and monitor GC time percentage until shutdown() is called. If you don't + * put a limit on the number of GCTimeMonitor instances that you create, and + * alertHandler != null, you should necessarily call shutdown() once the given + * instance is not needed. Otherwise, you may create a memory leak, because + * each running GCTimeMonitor will keep its alertHandler object in memory, + * which in turn may reference and keep in memory many more other objects. + * + * @param observationWindowMs the interval over which the percentage + * of GC time should be calculated. A practical value would be somewhere + * between 30 sec and several minutes. + * @param sleepIntervalMs how frequently this thread should
hive git commit: HIVE-20603: "Wrong FS" error when inserting to partition after changing table location filesystem (Jason Dere, reviewed by Sergey Shelukhin)
Repository: hive Updated Branches: refs/heads/master a81f53ac1 -> 4137c212c HIVE-20603: "Wrong FS" error when inserting to partition after changing table location filesystem (Jason Dere, reviewed by Sergey Shelukhin) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4137c212 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4137c212 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4137c212 Branch: refs/heads/master Commit: 4137c212ccbb1955ffba9c4f6e1b9876b07c122b Parents: a81f53a Author: Jason Dere Authored: Tue Sep 25 13:21:42 2018 -0700 Committer: Jason Dere Committed: Tue Sep 25 13:21:42 2018 -0700 -- .../test/resources/testconfiguration.properties | 2 + .../apache/hadoop/hive/ql/metadata/Hive.java| 2 +- .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 21 - .../clientpositive/alter_table_location2.q | 21 + .../clientpositive/alter_table_location3.q | 16 .../llap/alter_table_location2.q.out| 97 .../llap/alter_table_location3.q.out| 95 +++ .../hive/metastore/utils/MetaStoreUtils.java| 2 +- 8 files changed, 250 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/4137c212/itests/src/test/resources/testconfiguration.properties -- diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index df89748..def3561 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -374,6 +374,8 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\ vectorized_timestamp_ints_casts.q minillap.query.files=acid_bucket_pruning.q,\ + alter_table_location2.q,\ + alter_table_location3.q,\ bucket5.q,\ bucket6.q,\ dynamic_semijoin_user_level.q,\ http://git-wip-us.apache.org/repos/asf/hive/blob/4137c212/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 76541de..3c32de9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1942,7 +1942,7 @@ public class Hive { replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(), isSrcLocal, isAutoPurge, newFiles, FileUtils.HIDDEN_FILES_PATH_FILTER, needRecycle, isManaged); } else { - FileSystem fs = tbl.getDataLocation().getFileSystem(conf); + FileSystem fs = destPath.getFileSystem(conf); copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcidIUDoperation, (loadFileType == LoadFileType.OVERWRITE_EXISTING), newFiles, tbl.getNumBuckets() > 0, isFullAcidTable, isManaged); http://git-wip-us.apache.org/repos/asf/hive/blob/4137c212/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 3873282..344e9fc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -7344,10 +7344,23 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { checkImmutableTable(qb, destinationTable, partPath, true); - // if the table is in a different dfs than the partition, - // replace the partition's dfs with the table's dfs. - destinationPath = new Path(tabPath.toUri().getScheme(), tabPath.toUri() - .getAuthority(), partPath.toUri().getPath()); + // Previous behavior (HIVE-1707) used to replace the partition's dfs with the table's dfs. + // The changes in HIVE-19891 appears to no longer support that behavior. + destinationPath = partPath; + + if (MetaStoreUtils.isArchived(destinationPartition.getTPartition())) { +try { + String conflictingArchive = ArchiveUtils.conflictingArchiveNameOrNull( + db, destinationTable, destinationPartition.getSpec()); + String message = String.format("Insert conflict with existing archive: %s", + conflictingArchive); + throw new SemanticException(message); +} catch (SemanticException err) { + throw err; +} catch (HiveException err) { + throw new SemanticException(err); +} + } isMmTable =
hive git commit: HIVE-20430: CachedStore: bug fixes for TestEmbeddedHiveMetaStore, TestRemoteHiveMetaStore, TestMiniLlapCliDriver, TestMiniTezCliDriver, TestMinimrCliDriver (Vaibhav Gumashta reviewed
Repository: hive Updated Branches: refs/heads/master 6137ee5dd -> a81f53ac1 HIVE-20430: CachedStore: bug fixes for TestEmbeddedHiveMetaStore, TestRemoteHiveMetaStore, TestMiniLlapCliDriver, TestMiniTezCliDriver, TestMinimrCliDriver (Vaibhav Gumashta reviewed by Daniel Dai) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a81f53ac Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a81f53ac Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a81f53ac Branch: refs/heads/master Commit: a81f53ac17f4a5f0fd68fbdabe7a038b3612fd80 Parents: 6137ee5 Author: Vaibhav Gumashta Authored: Tue Sep 25 12:04:39 2018 -0700 Committer: Vaibhav Gumashta Committed: Tue Sep 25 12:11:01 2018 -0700 -- .../hadoop/hive/ql/exec/TestOperators.java | 1 + .../hive/metastore/cache/CachedStore.java | 130 +++ .../hive/metastore/cache/SharedCache.java | 8 +- 3 files changed, 81 insertions(+), 58 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/a81f53ac/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java -- diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java index abf7198..c7cd4ad 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java @@ -394,6 +394,7 @@ public class TestOperators extends TestCase { // ensure that both of the partitions are in the complete list. String[] dirs = job.get("hive.complete.dir.list").split("\t"); assertEquals(2, dirs.length); + Arrays.sort(dirs); assertEquals(true, dirs[0].endsWith("/state=CA")); assertEquals(true, dirs[1].endsWith("/state=OR")); return super.getSplits(job, splits); http://git-wip-us.apache.org/repos/asf/hive/blob/a81f53ac/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java -- diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 0445cbf..b9a5458 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -853,9 +853,7 @@ public class CachedStore implements RawStore, Configurable { } @Override - public Table getTable(String catName, String dbName, String tblName, -String validWriteIds) - throws MetaException { + public Table getTable(String catName, String dbName, String tblName, String validWriteIds) throws MetaException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); @@ -872,12 +870,28 @@ public class CachedStore implements RawStore, Configurable { return rawStore.getTable(catName, dbName, tblName, validWriteIds); } if (validWriteIds != null) { - tbl.setParameters(adjustStatsParamsForGet(tbl.getParameters(), - tbl.getParameters(), tbl.getWriteId(), validWriteIds)); + tbl.setParameters( + adjustStatsParamsForGet(tbl.getParameters(), tbl.getParameters(), tbl.getWriteId(), validWriteIds)); } tbl.unsetPrivileges(); tbl.setRewriteEnabled(tbl.isRewriteEnabled()); +if (tbl.getPartitionKeys() == null) { + // getTable call from ObjectStore returns an empty list + tbl.setPartitionKeys(new ArrayList<>()); +} +String tableType = tbl.getTableType(); +if (tableType == null) { + // for backwards compatibility with old metastore persistence + if (tbl.getViewOriginalText() != null) { +tableType = TableType.VIRTUAL_VIEW.toString(); + } else if ("TRUE".equals(tbl.getParameters().get("EXTERNAL"))) { +tableType = TableType.EXTERNAL_TABLE.toString(); + } else { +tableType = TableType.MANAGED_TABLE.toString(); + } +} +tbl.setTableType(tableType); return tbl; } @@ -1133,6 +1147,10 @@ public class CachedStore implements RawStore, Configurable { if (!isCachePrewarmed.get() || missSomeInCache) { return rawStore.getTableObjectsByName(catName, dbName, tblNames); } +Database db = sharedCache.getDatabaseFromCache(catName, dbName); +if (db == null) { + throw new UnknownDBException("Could not find database " + dbName); +} List tables
hive git commit: HIVE-20607: TxnHandler should use PreparedStatement to execute direct SQL queries (Sankar Hariappan, reviewed by Daniel Dai)
Repository: hive Updated Branches: refs/heads/master 307bbca96 -> 6137ee5dd HIVE-20607: TxnHandler should use PreparedStatement to execute direct SQL queries (Sankar Hariappan, reviewed by Daniel Dai) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6137ee5d Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6137ee5d Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6137ee5d Branch: refs/heads/master Commit: 6137ee5dd23f6a0317489248bfd2a2a97d20a04c Parents: 307bbca Author: Sankar Hariappan Authored: Tue Sep 25 23:56:51 2018 +0530 Committer: Sankar Hariappan Committed: Tue Sep 25 23:56:51 2018 +0530 -- .../listener/DbNotificationListener.java| 44 +- .../hive/ql/lockmgr/TestDbTxnManager.java | 4 +- .../hive/metastore/tools/SQLGenerator.java | 110 +++- .../hadoop/hive/metastore/txn/TxnHandler.java | 575 --- 4 files changed, 491 insertions(+), 242 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/6137ee5d/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java -- diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index 369d9a4..b287d43 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -23,7 +23,7 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -748,11 +748,14 @@ public class DbNotificationListener extends TransactionalMetaStoreEventListener String s = sqlGenerator.addForUpdateClause("select \"WNL_FILES\", \"WNL_ID\" from" + " \"TXN_WRITE_NOTIFICATION_LOG\" " + - "where \"WNL_DATABASE\" = " + quoteString(dbName) + - "and \"WNL_TABLE\" = " + quoteString(tblName) + " and \"WNL_PARTITION\" = " + - quoteString(partition) + " and \"WNL_TXNID\" = " + Long.toString(acidWriteEvent.getTxnId())); - LOG.debug("Going to execute query <" + s + ">"); - rs = stmt.executeQuery(s); + "where \"WNL_DATABASE\" = ? " + + "and \"WNL_TABLE\" = ? " + " and \"WNL_PARTITION\" = ? " + + "and \"WNL_TXNID\" = " + Long.toString(acidWriteEvent.getTxnId())); + List params = Arrays.asList(dbName, tblName, partition); + pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); + LOG.debug("Going to execute query <" + s.replaceAll("\\?", "{}") + ">", + quoteString(dbName), quoteString(tblName), quoteString(partition)); + rs = pst.executeQuery(); if (!rs.next()) { // if rs is empty then no lock is taken and thus it can not cause deadlock. long nextNLId = getNextNLId(stmt, sqlGenerator, @@ -761,6 +764,7 @@ public class DbNotificationListener extends TransactionalMetaStoreEventListener "(\"WNL_ID\", \"WNL_TXNID\", \"WNL_WRITEID\", \"WNL_DATABASE\", \"WNL_TABLE\", " + "\"WNL_PARTITION\", \"WNL_TABLE_OBJ\", \"WNL_PARTITION_OBJ\", " + "\"WNL_FILES\", \"WNL_EVENT_TIME\") VALUES (?,?,?,?,?,?,?,?,?,?)"; +closeStmt(pst); int currentTime = now(); pst = dbConn.prepareStatement(sqlGenerator.addEscapeCharacters(s)); pst.setLong(1, nextNLId); @@ -793,6 +797,7 @@ public class DbNotificationListener extends TransactionalMetaStoreEventListener " \"WNL_FILES\" = ? ," + " \"WNL_EVENT_TIME\" = ?" + " where \"WNL_ID\" = ?"; +closeStmt(pst); pst = dbConn.prepareStatement(sqlGenerator.addEscapeCharacters(s)); pst.setString(1, tableObj); pst.setString(2, partitionObj); @@ -826,6 +831,7 @@ public class DbNotificationListener extends TransactionalMetaStoreEventListener return; } Statement stmt = null; +PreparedStatement pst = null; ResultSet rs = null; try { stmt = dbConn.createStatement(); @@ -852,21 +858,20 @@ public class DbNotificationListener extends TransactionalMetaStoreEventListener long nextNLId = getNextNLId(stmt, sqlGenerator, "org.apache.hadoop.hive.metastore.model.MNotificationLog"); - List insert = new ArrayList<>(); +
hive git commit: HIVE-20599: CAST(INTERVAL_DAY_TIME AS STRING) is throwing SemanticException (Naresh P R reviewed by Prasanth Jayachandran)
Repository: hive Updated Branches: refs/heads/master dab8cc012 -> 307bbca96 HIVE-20599: CAST(INTERVAL_DAY_TIME AS STRING) is throwing SemanticException (Naresh P R reviewed by Prasanth Jayachandran) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/307bbca9 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/307bbca9 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/307bbca9 Branch: refs/heads/master Commit: 307bbca96c8bc845183db3608e5ef508a17a8bf4 Parents: dab8cc0 Author: nareshpr Authored: Tue Sep 25 10:12:28 2018 -0700 Committer: Prasanth Jayachandran Committed: Tue Sep 25 10:14:26 2018 -0700 -- .../test/queries/clientpositive/udf_to_string.q | 2 ++ .../results/clientpositive/udf_to_string.q.out| 18 ++ 2 files changed, 20 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/307bbca9/ql/src/test/queries/clientpositive/udf_to_string.q -- diff --git a/ql/src/test/queries/clientpositive/udf_to_string.q b/ql/src/test/queries/clientpositive/udf_to_string.q index 818f80f..4bb6220 100644 --- a/ql/src/test/queries/clientpositive/udf_to_string.q +++ b/ql/src/test/queries/clientpositive/udf_to_string.q @@ -17,3 +17,5 @@ SELECT CAST(CAST(-3.14 AS DECIMAL(3,2)) AS STRING) FROM src tablesample (1 rows) SELECT CAST('Foo' AS STRING) FROM src tablesample (1 rows); +SELECT CAST(from_utc_timestamp(timestamp '2018-05-02 15:30:30', 'PST') - from_utc_timestamp(timestamp '1970-01-30 16:00:00', 'PST') AS STRING); +SELECT CAST(interval_year_month('1-2') AS STRING); http://git-wip-us.apache.org/repos/asf/hive/blob/307bbca9/ql/src/test/results/clientpositive/udf_to_string.q.out -- diff --git a/ql/src/test/results/clientpositive/udf_to_string.q.out b/ql/src/test/results/clientpositive/udf_to_string.q.out index bf2f72d..fc888a1 100644 --- a/ql/src/test/results/clientpositive/udf_to_string.q.out +++ b/ql/src/test/results/clientpositive/udf_to_string.q.out @@ -88,3 +88,21 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src A masked pattern was here Foo +PREHOOK: query: SELECT CAST(from_utc_timestamp(timestamp '2018-05-02 15:30:30', 'PST') - from_utc_timestamp(timestamp '1970-01-30 16:00:00', 'PST') AS STRING) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table + A masked pattern was here +POSTHOOK: query: SELECT CAST(from_utc_timestamp(timestamp '2018-05-02 15:30:30', 'PST') - from_utc_timestamp(timestamp '1970-01-30 16:00:00', 'PST') AS STRING) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table + A masked pattern was here +17624 00:30:30.0 +PREHOOK: query: SELECT CAST(interval_year_month('1-2') AS STRING) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table + A masked pattern was here +POSTHOOK: query: SELECT CAST(interval_year_month('1-2') AS STRING) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table + A masked pattern was here +1-2
hive git commit: HIVE-20599: CAST(INTERVAL_DAY_TIME AS STRING) is throwing SemanticException (Naresh P R reviewed by Prasanth Jayachandran)
Repository: hive Updated Branches: refs/heads/branch-3 a879b9f13 -> fdc12f38f HIVE-20599: CAST(INTERVAL_DAY_TIME AS STRING) is throwing SemanticException (Naresh P R reviewed by Prasanth Jayachandran) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fdc12f38 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fdc12f38 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fdc12f38 Branch: refs/heads/branch-3 Commit: fdc12f38f8d89e8c62308683c6b9cabe5dad57ff Parents: a879b9f Author: nareshpr Authored: Tue Sep 25 10:15:22 2018 -0700 Committer: Prasanth Jayachandran Committed: Tue Sep 25 10:15:30 2018 -0700 -- .../apache/hadoop/hive/ql/udf/UDFToString.java| 18 ++ .../test/queries/clientpositive/udf_to_string.q | 3 +++ .../results/clientpositive/udf_to_string.q.out| 18 ++ 3 files changed, 39 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/fdc12f38/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToString.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToString.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToString.java index a16d429..2d49f21 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToString.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToString.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DateWritableV2; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.serde2.io.HiveIntervalDayTimeWritable; +import org.apache.hadoop.hive.serde2.io.HiveIntervalYearMonthWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; import org.apache.hadoop.hive.serde2.io.TimestampLocalTZWritable; import org.apache.hadoop.hive.serde2.io.TimestampWritableV2; @@ -178,4 +180,20 @@ public class UDFToString extends UDF { t.set(bw.getBytes(),0,bw.getLength()); return t; } + + public Text evaluate(HiveIntervalDayTimeWritable hiw) { +if (null == hiw) { + return null; +} +t.set(hiw.toString()); +return t; + } + + public Text evaluate(HiveIntervalYearMonthWritable hiw) { +if (null == hiw) { + return null; +} +t.set(hiw.toString()); +return t; + } } http://git-wip-us.apache.org/repos/asf/hive/blob/fdc12f38/ql/src/test/queries/clientpositive/udf_to_string.q -- diff --git a/ql/src/test/queries/clientpositive/udf_to_string.q b/ql/src/test/queries/clientpositive/udf_to_string.q index ac4b524..50186b8 100644 --- a/ql/src/test/queries/clientpositive/udf_to_string.q +++ b/ql/src/test/queries/clientpositive/udf_to_string.q @@ -16,3 +16,6 @@ SELECT CAST(CAST(-3.14 AS DECIMAL(3,2)) AS STRING) FROM src tablesample (1 rows) SELECT CAST('Foo' AS STRING) FROM src tablesample (1 rows); +SELECT CAST(from_utc_timestamp(timestamp '2018-05-02 15:30:30', 'PST') - from_utc_timestamp(timestamp '1970-01-30 16:00:00', 'PST') AS STRING); +SELECT CAST(interval_year_month('1-2') AS STRING); + http://git-wip-us.apache.org/repos/asf/hive/blob/fdc12f38/ql/src/test/results/clientpositive/udf_to_string.q.out -- diff --git a/ql/src/test/results/clientpositive/udf_to_string.q.out b/ql/src/test/results/clientpositive/udf_to_string.q.out index bf2f72d..fc888a1 100644 --- a/ql/src/test/results/clientpositive/udf_to_string.q.out +++ b/ql/src/test/results/clientpositive/udf_to_string.q.out @@ -88,3 +88,21 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src A masked pattern was here Foo +PREHOOK: query: SELECT CAST(from_utc_timestamp(timestamp '2018-05-02 15:30:30', 'PST') - from_utc_timestamp(timestamp '1970-01-30 16:00:00', 'PST') AS STRING) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table + A masked pattern was here +POSTHOOK: query: SELECT CAST(from_utc_timestamp(timestamp '2018-05-02 15:30:30', 'PST') - from_utc_timestamp(timestamp '1970-01-30 16:00:00', 'PST') AS STRING) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table + A masked pattern was here +17624 00:30:30.0 +PREHOOK: query: SELECT CAST(interval_year_month('1-2') AS STRING) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table + A masked pattern was here +POSTHOOK: query: SELECT CAST(interval_year_month('1-2') AS STRING) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table + A masked pattern was here +1-2
hive git commit: HIVE-18871: hive on tez execution error due to set hive.aux.jars.path to hdfs:// (zhuwei reviewed by Prasanth Jayachandran)
Repository: hive Updated Branches: refs/heads/branch-3 491c9f621 -> a879b9f13 HIVE-18871: hive on tez execution error due to set hive.aux.jars.path to hdfs:// (zhuwei reviewed by Prasanth Jayachandran) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a879b9f1 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a879b9f1 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a879b9f1 Branch: refs/heads/branch-3 Commit: a879b9f13e2b8815224dcc7c7514eef5cac7aaa1 Parents: 491c9f6 Author: zhuwei Authored: Mon Sep 24 10:24:27 2018 -0700 Committer: Prasanth Jayachandran Committed: Tue Sep 25 10:10:00 2018 -0700 -- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java | 8 +++- 1 file changed, 7 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/a879b9f1/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index de0abd1..1a88b77 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -62,6 +62,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -1263,7 +1264,12 @@ public class DagUtils { return createLocalResource(destFS, dest, type, LocalResourceVisibility.PRIVATE); } try { -destFS.copyFromLocalFile(false, false, src, dest); +if (src.toUri().getScheme()!=null) { + FileUtil.copy(src.getFileSystem(conf), src, destFS, dest, false, false, conf); +} +else { + destFS.copyFromLocalFile(false, false, src, dest); +} synchronized (notifier) { notifier.notifyAll(); // Notify if we have successfully copied the file. }
hive git commit: HIVE-18871: hive on tez execution error due to set hive.aux.jars.path to hdfs:// (zhuwei reviewed by Prasanth Jayachandran)
Repository: hive Updated Branches: refs/heads/master ffdbee050 -> dab8cc012 HIVE-18871: hive on tez execution error due to set hive.aux.jars.path to hdfs:// (zhuwei reviewed by Prasanth Jayachandran) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dab8cc01 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dab8cc01 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dab8cc01 Branch: refs/heads/master Commit: dab8cc01297bc82e17d3e666a49e3b6392f878b4 Parents: ffdbee0 Author: zhuwei Authored: Mon Sep 24 10:24:27 2018 -0700 Committer: Prasanth Jayachandran Committed: Tue Sep 25 10:09:41 2018 -0700 -- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java | 8 +++- 1 file changed, 7 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/dab8cc01/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index de0abd1..1a88b77 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -62,6 +62,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -1263,7 +1264,12 @@ public class DagUtils { return createLocalResource(destFS, dest, type, LocalResourceVisibility.PRIVATE); } try { -destFS.copyFromLocalFile(false, false, src, dest); +if (src.toUri().getScheme()!=null) { + FileUtil.copy(src.getFileSystem(conf), src, destFS, dest, false, false, conf); +} +else { + destFS.copyFromLocalFile(false, false, src, dest); +} synchronized (notifier) { notifier.notifyAll(); // Notify if we have successfully copied the file. }
hive git commit: HIVE-20601 : EnvironmentContext null in ALTER_PARTITION event in DbNotificationListener (Bharath Krishna, reviewed by Andrew Sherman)
Repository: hive Updated Branches: refs/heads/master e161b0113 -> ffdbee050 HIVE-20601 : EnvironmentContext null in ALTER_PARTITION event in DbNotificationListener (Bharath Krishna, reviewed by Andrew Sherman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ffdbee05 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ffdbee05 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ffdbee05 Branch: refs/heads/master Commit: ffdbee0500e08107a7efd5b47df60c7a26751778 Parents: e161b01 Author: Bharath Krishna Authored: Tue Sep 25 09:29:56 2018 -0700 Committer: Andrew Sherman Committed: Tue Sep 25 09:32:55 2018 -0700 -- .../org/apache/hadoop/hive/metastore/HiveAlterHandler.java| 7 +++ 1 file changed, 3 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/ffdbee05/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java -- diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index f52ff91..0ea46f8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -743,10 +743,9 @@ public class HiveAlterHandler implements AlterHandler { } if (transactionalListeners != null && !transactionalListeners.isEmpty()) { - MetaStoreListenerNotifier.notifyEvent(transactionalListeners, - EventMessage.EventType.ALTER_PARTITION, -new AlterPartitionEvent(oldPart, newPart, tbl, false, -true, newPart.getWriteId(), handler)); + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, + new AlterPartitionEvent(oldPart, newPart, tbl, false, true, newPart.getWriteId(), handler), + environmentContext); } }