[
https://issues.apache.org/jira/browse/HIVE-23718?focusedWorklogId=448046&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-448046
]
ASF GitHub Bot logged work on HIVE-23718:
-----------------------------------------
Author: ASF GitHub Bot
Created on: 18/Jun/20 21:20
Start Date: 18/Jun/20 21:20
Worklog Time Spent: 10m
Work Description: miklosgergely commented on a change in pull request
#1142:
URL: https://github.com/apache/hive/pull/1142#discussion_r442508297
##########
File path: ql/src/java/org/apache/hadoop/hive/ql/Driver.java
##########
@@ -308,110 +255,6 @@ public FetchTask getFetchTask() {
return driverContext.getFetchTask();
}
- /**
- * Acquire read and write locks needed by the statement. The list of objects
to be locked are
- * obtained from the inputs and outputs populated by the compiler. Locking
strategy depends on
- * HiveTxnManager and HiveLockManager configured
- *
- * This method also records the list of valid transactions. This must be
done after any
- * transactions have been opened.
- * @throws CommandProcessorException
- **/
- private void acquireLocks() throws CommandProcessorException {
- PerfLogger perfLogger = SessionState.getPerfLogger();
- perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
-
- if(!driverContext.getTxnManager().isTxnOpen() &&
driverContext.getTxnManager().supportsAcid()) {
- /*non acid txn managers don't support txns but fwd lock requests to lock
managers
- acid txn manager requires all locks to be associated with a txn so if
we
- end up here w/o an open txn it's because we are processing something
like "use <database>
- which by definition needs no locks*/
- return;
- }
- try {
- String userFromUGI = DriverUtils.getUserFromUGI(driverContext);
-
- // Set the table write id in all of the acid file sinks
- if (!driverContext.getPlan().getAcidSinks().isEmpty()) {
- List<FileSinkDesc> acidSinks = new
ArrayList<>(driverContext.getPlan().getAcidSinks());
- //sorting makes tests easier to write since file names and ROW__IDs
depend on statementId
- //so this makes (file name -> data) mapping stable
- acidSinks.sort((FileSinkDesc fsd1, FileSinkDesc fsd2) ->
- fsd1.getDirName().compareTo(fsd2.getDirName()));
- for (FileSinkDesc desc : acidSinks) {
- TableDesc tableInfo = desc.getTableInfo();
- final TableName tn =
HiveTableName.ofNullable(tableInfo.getTableName());
- long writeId =
driverContext.getTxnManager().getTableWriteId(tn.getDb(), tn.getTable());
- desc.setTableWriteId(writeId);
-
- /**
- * it's possible to have > 1 FileSink writing to the same
table/partition
- * e.g. Merge stmt, multi-insert stmt when mixing DP and SP writes
- * Insert ... Select ... Union All Select ... using
- * {@link
org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator#UNION_SUDBIR_PREFIX}
- */
-
desc.setStatementId(driverContext.getTxnManager().getStmtIdAndIncrement());
- String unionAllSubdir = "/" +
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX;
- if(desc.getInsertOverwrite() &&
desc.getDirName().toString().contains(unionAllSubdir) &&
- desc.isFullAcidTable()) {
- throw new UnsupportedOperationException("QueryId=" +
driverContext.getPlan().getQueryId() +
- " is not supported due to OVERWRITE and UNION ALL. Please use
truncate + insert");
- }
- }
- }
-
- if (driverContext.getPlan().getAcidAnalyzeTable() != null) {
- // Allocate write ID for the table being analyzed.
- Table t = driverContext.getPlan().getAcidAnalyzeTable().getTable();
- driverContext.getTxnManager().getTableWriteId(t.getDbName(),
t.getTableName());
- }
-
-
- DDLDescWithWriteId acidDdlDesc =
driverContext.getPlan().getAcidDdlDesc();
- boolean hasAcidDdl = acidDdlDesc != null && acidDdlDesc.mayNeedWriteId();
- if (hasAcidDdl) {
- String fqTableName = acidDdlDesc.getFullTableName();
- final TableName tn =
HiveTableName.ofNullableWithNoDefault(fqTableName);
- long writeId =
driverContext.getTxnManager().getTableWriteId(tn.getDb(), tn.getTable());
- acidDdlDesc.setWriteId(writeId);
- }
-
- /*It's imperative that {@code acquireLocks()} is called for all commands
so that
- HiveTxnManager can transition its state machine correctly*/
- driverContext.getTxnManager().acquireLocks(driverContext.getPlan(),
context, userFromUGI, driverState);
- final List<HiveLock> locks = context.getHiveLocks();
- LOG.info("Operation {} obtained {} locks",
driverContext.getPlan().getOperation(),
- ((locks == null) ? 0 : locks.size()));
- // This check is for controlling the correctness of the current state
- if
(driverContext.getTxnManager().recordSnapshot(driverContext.getPlan()) &&
- !driverContext.isValidTxnListsGenerated()) {
- throw new IllegalStateException(
- "Need to record valid WriteID list but there is no valid TxnID
list (" +
-
JavaUtils.txnIdToString(driverContext.getTxnManager().getCurrentTxnId()) +
- ", queryId:" + driverContext.getPlan().getQueryId() + ")");
- }
-
- if (driverContext.getPlan().hasAcidResourcesInQuery() || hasAcidDdl) {
- validTxnManager.recordValidWriteIds();
- }
-
- } catch (Exception e) {
- String errorMessage;
- if (driverState.isDestroyed() || driverState.isAborted() ||
driverState.isClosed()) {
- errorMessage = String.format("Ignore lock acquisition related
exception in terminal state (%s): %s",
- driverState.toString(), e.getMessage());
- CONSOLE.printInfo(errorMessage);
- } else {
- errorMessage = String.format("FAILED: Error in acquiring locks: %s",
e.getMessage());
- CONSOLE.printError(errorMessage, "\n" +
StringUtils.stringifyException(e));
- }
- throw DriverUtils.createProcessorException(driverContext, 10,
errorMessage, ErrorMsg.findSQLState(e.getMessage()),
- e);
- } finally {
- perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
- }
- }
-
public void releaseLocksAndCommitOrRollback(boolean commit) throws
LockException {
releaseLocksAndCommitOrRollback(commit, driverContext.getTxnManager());
}
Review comment:
I'm planning to remove both releaseLocksAndCommitOrRollback functions
from Driver, but unfortunately they are both used by some external callers. In
a future patch I'll try to remove those. I've removed all the references to
them within the Driver class for now.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
Issue Time Tracking
-------------------
Worklog Id: (was: 448046)
Time Spent: 1h 50m (was: 1h 40m)
> Extract transaction handling from Driver
> ----------------------------------------
>
> Key: HIVE-23718
> URL: https://issues.apache.org/jira/browse/HIVE-23718
> Project: Hive
> Issue Type: Sub-task
> Components: HiveServer2
> Reporter: Miklos Gergely
> Assignee: Miklos Gergely
> Priority: Major
> Labels: pull-request-available
> Time Spent: 1h 50m
> Remaining Estimate: 0h
>
--
This message was sent by Atlassian Jira
(v8.3.4#803005)