This is an automated email from the ASF dual-hosted git repository.
gsaihemanth pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 2e323679832 HIVE-27268: synthesize empty id lists if no session
available (#4303) (Henri Biestro, reviewed by Sai Hemanth G)
2e323679832 is described below
commit 2e3236798322e165de7a2ab42820e87ad5ce3cfb
Author: Henrib <[email protected]>
AuthorDate: Tue May 9 21:48:28 2023 +0200
HIVE-27268: synthesize empty id lists if no session available (#4303)
(Henri Biestro, reviewed by Sai Hemanth G)
---
ql/src/java/org/apache/hadoop/hive/ql/Context.java | 7 +++++-
.../org/apache/hadoop/hive/ql/metadata/Hive.java | 7 +++++-
.../apache/hadoop/hive/ql/TestTxnNoBuckets.java | 27 ++++++++++++++++++++++
3 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index f21ac7efaad..94e8e0edc2b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -45,6 +45,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.common.BlobStorageUtils;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.cleanup.CleanupService;
+import org.apache.hadoop.hive.ql.cleanup.SyncCleanupService;
import org.apache.hadoop.hive.ql.exec.TaskRunner;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
@@ -741,7 +743,10 @@ public class Context {
// because that will be taken care by removeResultCacheDir
FileSystem fs = p.getFileSystem(conf);
LOG.info("Deleting scratch dir: {}", p);
- sessionState.getCleanupService().deleteRecursive(p, fs);
+ CleanupService cleanupService = sessionState != null
+ ? sessionState.getCleanupService()
+ : SyncCleanupService.INSTANCE;
+ cleanupService.deleteRecursive(p, fs);
}
} catch (Exception e) {
LOG.warn("Error Removing Scratch", e);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index d26b65ff350..f6dbf3c7929 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1773,9 +1773,14 @@ public class Hive {
*/
private ValidWriteIdList getValidWriteIdList(String dbName, String
tableName) throws LockException {
ValidWriteIdList validWriteIdList = null;
- long txnId = SessionState.get() != null && SessionState.get().getTxnMgr()
!= null ? SessionState.get().getTxnMgr().getCurrentTxnId() : 0;
+ SessionState sessionState = SessionState.get();
+ HiveTxnManager txnMgr = sessionState != null? sessionState.getTxnMgr() :
null;
+ long txnId = txnMgr != null ? txnMgr.getCurrentTxnId() : 0;
if (txnId > 0) {
validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf,
dbName, tableName);
+ } else {
+ String fullTableName = getFullTableName(dbName, tableName);
+ validWriteIdList = new ValidReaderWriteIdList(fullTableName, new
long[0], new BitSet(), Long.MAX_VALUE);
}
return validWriteIdList;
}
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
index fa43d2a39f5..f8ee483053f 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
import org.apache.hadoop.hive.ql.io.BucketCodec;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.processors.CommandProcessorException;
+import org.apache.hadoop.hive.ql.session.SessionState;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
@@ -909,5 +910,31 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree
/Users/ekoifman/dev/hiver
Assert.assertEquals(stringifyValues(data), rs);
}
+
+ /**
+ * HIVE-27268
+ */
+ @Test
+ public void testGetPartitionsNoSession() throws Exception {
+ hiveConf.setIntVar(HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD,
-1);
+ runStatementOnDriver("drop table if exists T");
+ runStatementOnDriver("create table T(a int, b int) partitioned by (p int,
q int) " +
+ "stored as orc TBLPROPERTIES ('transactional'='true')");
+
+ int[][] targetVals = {{4, 1, 1}, {4, 2, 2}, {4, 3, 1}, {4, 4, 2}};
+ //we only recompute stats after major compact if they existed before
+ runStatementOnDriver("insert into T partition(p=1,q) " +
makeValuesClause(targetVals));
+ runStatementOnDriver("analyze table T partition(p=1) compute statistics
for columns");
+
+ Hive hive = Hive.get();
+ org.apache.hadoop.hive.ql.metadata.Table hiveTable = hive.getTable("T");
+ // this will ensure the getValidWriteIdList has no session to work with
(thru getPartitions)
+ SessionState.detachSession();
+ List<org.apache.hadoop.hive.ql.metadata.Partition> partitions =
hive.getPartitions(hiveTable);
+ Assert.assertNotNull(partitions);
+ // prevent tear down failure
+ d.close();
+ d = null;
+ }
}