Repository: hive
Updated Branches:
  refs/heads/master 00a8e1a13 -> ab33a7b7d


HIVE-18575 : ACID properties usage in jobconf is ambiguous for MM tables 
(Sergey Shelukhin, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ab33a7b7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ab33a7b7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ab33a7b7

Branch: refs/heads/master
Commit: ab33a7b7decb03ef378b00c11b813b12e66f7be7
Parents: 00a8e1a
Author: sergey <ser...@apache.org>
Authored: Mon Feb 12 11:26:52 2018 -0800
Committer: sergey <ser...@apache.org>
Committed: Mon Feb 12 11:26:52 2018 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  5 +-
 .../mapreduce/FosterStorageHandler.java         |  6 +-
 .../hive/hcatalog/streaming/HiveEndPoint.java   |  2 +-
 .../streaming/mutate/client/lock/Lock.java      |  4 +-
 .../hive/hcatalog/streaming/TestStreaming.java  |  2 +-
 .../streaming/mutate/StreamingAssert.java       |  2 +-
 .../hive/ql/txn/compactor/TestCompactor.java    |  2 +-
 .../hive/llap/io/api/impl/LlapRecordReader.java |  3 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |  2 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  2 +-
 .../apache/hadoop/hive/ql/exec/FetchTask.java   |  4 +-
 .../hadoop/hive/ql/exec/SMBMapJoinOperator.java |  4 +-
 .../hadoop/hive/ql/exec/mr/MapredLocalTask.java |  4 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 80 ++++++++++----------
 .../hadoop/hive/ql/io/HiveInputFormat.java      |  9 ++-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   | 17 ++---
 .../apache/hadoop/hive/ql/io/orc/OrcSplit.java  | 14 ++--
 .../io/orc/VectorizedOrcAcidRowBatchReader.java |  2 +-
 .../ql/io/orc/VectorizedOrcInputFormat.java     |  3 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    | 10 +--
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  4 +-
 .../BucketingSortingReduceSinkOptimizer.java    |  2 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       |  8 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  | 15 ++--
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  2 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 12 +--
 .../hive/ql/parse/repl/dump/TableExport.java    |  4 +-
 .../hadoop/hive/ql/plan/TableScanDesc.java      | 18 +++--
 .../apache/hadoop/hive/ql/stats/Partish.java    |  2 +-
 .../hive/ql/txn/compactor/CompactorMR.java      |  5 +-
 .../apache/hadoop/hive/ql/io/TestAcidUtils.java |  4 +-
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 13 ++--
 .../hive/ql/io/orc/TestOrcRawRecordMerger.java  | 18 +++--
 .../TestVectorizedOrcAcidRowBatchReader.java    |  2 +-
 .../hive/metastore/LockComponentBuilder.java    |  2 +-
 35 files changed, 151 insertions(+), 137 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 67e22f6..adb9b9b 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1337,8 +1337,9 @@ public class HiveConf extends Configuration {
     HIVE_SCHEMA_EVOLUTION("hive.exec.schema.evolution", true,
         "Use schema evolution to convert self-describing file format's data to 
the schema desired by the reader."),
 
-    HIVE_ACID_TABLE_SCAN("hive.acid.table.scan", false,
-        "internal usage only -- do transaction (ACID) table scan.", true),
+    /** Don't use this directly - use AcidUtils! */
+    HIVE_TRANSACTIONAL_TABLE_SCAN("hive.transactional.table.scan", false,
+        "internal usage only -- do transaction (ACID or insert-only) table 
scan.", true),
 
     HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY("hive.transactional.events.mem", 
10000000,
         "Vectorized ACID readers can often load all the delete events from all 
the delete deltas\n"

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
index 5ee8aad..195eaa3 100644
--- 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
+++ 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
@@ -134,10 +134,8 @@ public class FosterStorageHandler extends 
DefaultStorageHandler {
         boolean isTransactionalTable = 
AcidUtils.isTablePropertyTransactional(tableProperties);
         AcidUtils.AcidOperationalProperties acidOperationalProperties =
                 AcidUtils.getAcidOperationalProperties(tableProperties);
-        if(acidOperationalProperties.isSplitUpdate()) {
-          AcidUtils.setAcidTableScan(jobProperties, isTransactionalTable);
-        }
-        AcidUtils.setAcidOperationalProperties(jobProperties, 
acidOperationalProperties);
+        AcidUtils.setAcidOperationalProperties(
+            jobProperties, isTransactionalTable, acidOperationalProperties);
       }
     } catch (IOException e) {
       throw new IllegalStateException("Failed to set output path", e);

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index 3388a34..6793d09 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -341,7 +341,7 @@ public class HiveEndPoint {
         throw new InvalidTable(endPoint.database, endPoint.table, e);
       }
       // 1 - check that the table is Acid
-      if (!AcidUtils.isAcidTable(t)) {
+      if (!AcidUtils.isFullAcidTable(t)) {
         LOG.error("HiveEndPoint " + endPoint + " must use an acid table");
         throw new InvalidTable(endPoint.database, endPoint.table, "is not an 
Acid table");
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
index c272837..429555f 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
@@ -182,9 +182,9 @@ public class Lock {
       //todo: DataOperationType is set conservatively here, we'd really want 
to distinguish update/delete
       //and insert/select and if resource (that is written to) is ACID or not
       if (sinks.contains(table)) {
-        
componentBuilder.setSemiShared().setOperationType(DataOperationType.UPDATE).setIsAcid(true);
+        
componentBuilder.setSemiShared().setOperationType(DataOperationType.UPDATE).setIsFullAcid(true);
       } else {
-        
componentBuilder.setShared().setOperationType(DataOperationType.INSERT).setIsAcid(true);
+        
componentBuilder.setShared().setOperationType(DataOperationType.INSERT).setIsFullAcid(true);
       }
       LockComponent component = componentBuilder.build();
       requestBuilder.addLockComponent(component);

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index 4e92812..da2ca72 100644
--- 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -571,7 +571,7 @@ public class TestStreaming {
     job.set(BUCKET_COUNT, Integer.toString(buckets));
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
-    AcidUtils.setAcidTableScan(job,true);
+    AcidUtils.setAcidOperationalProperties(job, true, null);
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString());
     InputSplit[] splits = inf.getSplits(job, buckets);

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
index c98d22b..873cddf 100644
--- 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
+++ 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
@@ -143,7 +143,7 @@ public class StreamingAssert {
     job.set(hive_metastoreConstants.BUCKET_COUNT, 
Integer.toString(table.getSd().getNumBuckets()));
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
-    AcidUtils.setAcidTableScan(job,true);
+    AcidUtils.setAcidOperationalProperties(job, true, null);
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString());
     InputSplit[] splits = inputFormat.getSplits(job, 1);

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index a5e6293..6dd7305 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -1385,7 +1385,7 @@ public class TestCompactor {
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, columnNamesProperty);
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, columnTypesProperty);
     conf.set(hive_metastoreConstants.BUCKET_COUNT, 
Integer.toString(numBuckets));
-    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
true);
     AcidInputFormat.RawReader<OrcStruct> reader =
         aif.getRawReader(conf, true, bucket, txnList, base, deltas);
     RecordIdentifier identifier = reader.createKey();

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
----------------------------------------------------------------------
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
index d252279..a69c9a0 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.tez.DagUtils;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcSplit;
 import org.apache.hadoop.hive.ql.io.orc.VectorizedOrcAcidRowBatchReader;
@@ -160,7 +161,7 @@ class LlapRecordReader
       }
     }
 
-    isAcidScan = HiveConf.getBoolVar(jobConf, ConfVars.HIVE_ACID_TABLE_SCAN);
+    isAcidScan = AcidUtils.isFullAcidScan(jobConf);
     TypeDescription schema = OrcInputFormat.getDesiredRowTypeDescr(
         job, isAcidScan, Integer.MAX_VALUE);
     if (isAcidScan) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index 68bb168..a6d2a04 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -231,7 +231,7 @@ public class OrcEncodedDataReader extends 
CallableWithNdc<Void>
       readerSchema = fileMetadata.getSchema();
     }
     readerIncludes = OrcInputFormat.genIncludedColumns(readerSchema, 
includedColumnIds);
-    if (HiveConf.getBoolVar(jobConf, ConfVars.HIVE_ACID_TABLE_SCAN)) {
+    if (AcidUtils.isFullAcidScan(jobConf)) {
       fileIncludes = OrcInputFormat.shiftReaderIncludedForAcid(readerIncludes);
     } else {
       fileIncludes = OrcInputFormat.genIncludedColumns(fileSchema, 
includedColumnIds);

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 20c2c32..227f6ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -2189,7 +2189,7 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
   private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException 
{
 
     Table tbl = db.getTable(desc.getTableName());
-    if (!AcidUtils.isAcidTable(tbl) && 
!AcidUtils.isInsertOnlyTable(tbl.getParameters())) {
+    if (!AcidUtils.isTransactionalTable(tbl)) {
       throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, 
tbl.getDbName(),
           tbl.getTableName());
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
index 090a188..ada4aba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
@@ -78,8 +78,8 @@ public class FetchTask extends Task<FetchWork> implements 
Serializable {
         // push down filters
         HiveInputFormat.pushFilters(job, ts, null);
 
-        AcidUtils.setAcidTableScan(job, ts.getConf().isAcidTable());
-        AcidUtils.setAcidOperationalProperties(job, 
ts.getConf().getAcidOperationalProperties());
+        AcidUtils.setAcidOperationalProperties(job, 
ts.getConf().isTranscationalTable(),
+            ts.getConf().getAcidOperationalProperties());
       }
       sink = work.getSink();
       fetch = new FetchOperator(work, job, source, getVirtualColumns(source));

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
index 270b576..4732da4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
@@ -209,8 +209,8 @@ public class SMBMapJoinOperator extends 
AbstractMapJoinOperator<SMBJoinDesc> imp
       // push down filters
       HiveInputFormat.pushFilters(jobClone, ts, null);
 
-      AcidUtils.setAcidTableScan(jobClone, ts.getConf().isAcidTable());
-      AcidUtils.setAcidOperationalProperties(jobClone, 
ts.getConf().getAcidOperationalProperties());
+      AcidUtils.setAcidOperationalProperties(jobClone, 
ts.getConf().isTranscationalTable(),
+          ts.getConf().getAcidOperationalProperties());
 
       ts.passExecContext(getExecContext());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
index abd42ec..30bf534 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
@@ -485,8 +485,8 @@ public class MapredLocalTask extends Task<MapredLocalWork> 
implements Serializab
       // push down filters
       HiveInputFormat.pushFilters(jobClone, ts, null);
 
-      AcidUtils.setAcidTableScan(jobClone, ts.getConf().isAcidTable());
-      AcidUtils.setAcidOperationalProperties(jobClone, 
ts.getConf().getAcidOperationalProperties());
+      AcidUtils.setAcidOperationalProperties(jobClone, 
ts.getConf().isTranscationalTable(),
+          ts.getConf().getAcidOperationalProperties());
 
       // create a fetch operator
       FetchOperator fetchOp = new FetchOperator(entry.getValue(), jobClone);

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 430e0fc..553e8bc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1232,19 +1232,8 @@ public class AcidUtils {
     }
     return resultStr != null && resultStr.equalsIgnoreCase("true");
   }
-  /**
-   * Means it's a full acid table
-   */
-  public static void setAcidTableScan(Map<String, String> parameters, boolean 
isAcidTable) {
-    parameters.put(ConfVars.HIVE_ACID_TABLE_SCAN.varname, 
Boolean.toString(isAcidTable));
-  }
 
-  /**
-   * Means it's a full acid table
-   */
-  public static void setAcidTableScan(Configuration conf, boolean 
isFullAcidTable) {
-    HiveConf.setBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN, isFullAcidTable);
-  }
+
   /**
    * @param p - not null
    */
@@ -1252,21 +1241,6 @@ public class AcidUtils {
     return p.getName().startsWith(DELETE_DELTA_PREFIX);
   }
 
-  /**
-   * Should produce the same result as
-   * {@link 
org.apache.hadoop.hive.metastore.txn.TxnUtils#isTransactionalTable(org.apache.hadoop.hive.metastore.api.Table)}
-   */
-  public static boolean isTransactionalTable(Table table) {
-    if (table == null) {
-      return false;
-    }
-    String tableIsTransactional = 
table.getProperty(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
-    if (tableIsTransactional == null) {
-      tableIsTransactional = 
table.getProperty(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase());
-    }
-
-    return tableIsTransactional != null && 
tableIsTransactional.equalsIgnoreCase("true");
-  }
   public static boolean isTransactionalTable(CreateTableDesc table) {
     if (table == null || table.getTblProps() == null) {
       return false;
@@ -1282,20 +1256,29 @@ public class AcidUtils {
    * Should produce the same result as
    * {@link 
org.apache.hadoop.hive.metastore.txn.TxnUtils#isAcidTable(org.apache.hadoop.hive.metastore.api.Table)}
    */
-  public static boolean isAcidTable(Table table) {
-    return isAcidTable(table == null ? null : table.getTTable());
+  public static boolean isFullAcidTable(Table table) {
+    return isFullAcidTable(table == null ? null : table.getTTable());
   }
+
+  public static boolean isTransactionalTable(Table table) {
+    return isTransactionalTable(table == null ? null : table.getTTable());
+  }
+
   /**
    * Should produce the same result as
    * {@link 
org.apache.hadoop.hive.metastore.txn.TxnUtils#isAcidTable(org.apache.hadoop.hive.metastore.api.Table)}
    */
-  public static boolean isAcidTable(org.apache.hadoop.hive.metastore.api.Table 
table) {
-    return table != null && table.getParameters() != null &&
-        isTablePropertyTransactional(table.getParameters()) &&
+  public static boolean 
isFullAcidTable(org.apache.hadoop.hive.metastore.api.Table table) {
+    return isTransactionalTable(table) &&
         !isInsertOnlyTable(table.getParameters());
   }
 
-  public static boolean isAcidTable(CreateTableDesc td) {
+  public static boolean 
isTransactionalTable(org.apache.hadoop.hive.metastore.api.Table table) {
+    return table != null && table.getParameters() != null &&
+        isTablePropertyTransactional(table.getParameters());
+  }
+
+  public static boolean isFullAcidTable(CreateTableDesc td) {
     if (td == null || td.getTblProps() == null) {
       return false;
     }
@@ -1306,17 +1289,31 @@ public class AcidUtils {
     return tableIsTransactional != null && 
tableIsTransactional.equalsIgnoreCase("true") &&
       !AcidUtils.isInsertOnlyTable(td.getTblProps());
   }
-  
+
+  public static boolean isFullAcidScan(Configuration conf) {
+    if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN)) 
return false;
+    int propInt = 
conf.getInt(ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES.varname, -1);
+    if (propInt == -1) return true;
+    AcidOperationalProperties props = 
AcidOperationalProperties.parseInt(propInt);
+    return !props.isInsertOnly();
+  }
 
   /**
    * Sets the acidOperationalProperties in the configuration object argument.
    * @param conf Mutable configuration object
-   * @param properties An acidOperationalProperties object to initialize from.
+   * @param properties An acidOperationalProperties object to initialize from. 
If this is null,
+   *                   we assume this is a full transactional table.
    */
-  public static void setAcidOperationalProperties(Configuration conf,
-          AcidOperationalProperties properties) {
-    if (properties != null) {
-      HiveConf.setIntVar(conf, ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES, 
properties.toInt());
+  public static void setAcidOperationalProperties(
+      Configuration conf, boolean isTxnTable, AcidOperationalProperties 
properties) {
+    if (isTxnTable) {
+      HiveConf.setBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
isTxnTable);
+      if (properties != null) {
+        HiveConf.setIntVar(conf, ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES, 
properties.toInt());
+      }
+    } else {
+      conf.unset(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname);
+      conf.unset(ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES.varname);
     }
   }
 
@@ -1325,8 +1322,9 @@ public class AcidUtils {
    * @param parameters Mutable map object
    * @param properties An acidOperationalProperties object to initialize from.
    */
-  public static void setAcidOperationalProperties(
-          Map<String, String> parameters, AcidOperationalProperties 
properties) {
+  public static void setAcidOperationalProperties(Map<String, String> 
parameters,
+      boolean isTxnTable, AcidOperationalProperties properties) {
+    parameters.put(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, 
Boolean.toString(isTxnTable));
     if (properties != null) {
       parameters.put(ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES.varname, 
properties.toString());
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index 856b026..5cd30cb 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -469,8 +469,9 @@ public class HiveInputFormat<K extends WritableComparable, 
V extends Writable>
 
     try {
       Utilities.copyTablePropertiesToConf(table, conf);
-      if(tableScan != null) {
-        AcidUtils.setAcidTableScan(conf, tableScan.getConf().isAcidTable());
+      if (tableScan != null) {
+        AcidUtils.setAcidOperationalProperties(conf, 
tableScan.getConf().isTranscationalTable(),
+            tableScan.getConf().getAcidOperationalProperties());
       }
     } catch (HiveException e) {
       throw new IOException(e);
@@ -868,8 +869,8 @@ public class HiveInputFormat<K extends WritableComparable, 
V extends Writable>
         // push down filters
         pushFilters(jobConf, ts, this.mrwork);
 
-        AcidUtils.setAcidTableScan(job, ts.getConf().isAcidTable());
-        AcidUtils.setAcidOperationalProperties(job, 
ts.getConf().getAcidOperationalProperties());
+        AcidUtils.setAcidOperationalProperties(job, 
ts.getConf().isTranscationalTable(),
+            ts.getConf().getAcidOperationalProperties());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index ff2cc04..5e29070 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -199,7 +199,7 @@ public class OrcInputFormat implements 
InputFormat<NullWritable, OrcStruct>,
    * @param inputSplit
    * @return
    */
-  public boolean isAcidRead(Configuration conf, InputSplit inputSplit) {
+  public boolean isFullAcidRead(Configuration conf, InputSplit inputSplit) {
     if (!(inputSplit instanceof OrcSplit)) {
       return false;
     }
@@ -214,7 +214,7 @@ public class OrcInputFormat implements 
InputFormat<NullWritable, OrcStruct>,
     /*
      * Fallback for the case when OrcSplit flags do not contain hasBase and 
deltas
      */
-    return HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN);
+    return AcidUtils.isFullAcidScan(conf);
   }
 
   private static class OrcRecordReader
@@ -308,9 +308,7 @@ public class OrcInputFormat implements 
InputFormat<NullWritable, OrcStruct>,
                                                   Configuration conf,
                                                   long offset, long length
                                                   ) throws IOException {
-
-    boolean isTransactionalTableScan = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_ACID_TABLE_SCAN);
-    if (isTransactionalTableScan) {
+    if (AcidUtils.isFullAcidScan(conf)) {
       raiseAcidTablesMustBeReadWithAcidReaderException(conf);
     }
 
@@ -1691,11 +1689,10 @@ public class OrcInputFormat implements 
InputFormat<NullWritable, OrcStruct>,
       pathFutures.add(ecs.submit(fileGenerator));
     }
 
-    boolean isTransactionalTableScan =
-        HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN);
+    boolean isAcidTableScan = AcidUtils.isFullAcidScan(conf);
     boolean isSchemaEvolution = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_SCHEMA_EVOLUTION);
     TypeDescription readerSchema =
-        OrcInputFormat.getDesiredRowTypeDescr(conf, isTransactionalTableScan, 
Integer.MAX_VALUE);
+        OrcInputFormat.getDesiredRowTypeDescr(conf, isAcidTableScan, 
Integer.MAX_VALUE);
     List<OrcProto.Type> readerTypes = null;
     if (readerSchema != null) {
       readerTypes = OrcUtils.getOrcTypes(readerSchema);
@@ -1703,7 +1700,7 @@ public class OrcInputFormat implements 
InputFormat<NullWritable, OrcStruct>,
     if (LOG.isDebugEnabled()) {
       LOG.debug("Generate splits schema evolution property " + 
isSchemaEvolution +
         " reader schema " + (readerSchema == null ? "NULL" : 
readerSchema.toString()) +
-        " transactional scan property " + isTransactionalTableScan);
+        " ACID scan property " + isAcidTableScan);
     }
 
     // complete path futures and schedule split generation
@@ -1891,7 +1888,7 @@ public class OrcInputFormat implements 
InputFormat<NullWritable, OrcStruct>,
                   Reporter reporter) throws IOException {
     //CombineHiveInputFormat may produce FileSplit that is not OrcSplit
     boolean vectorMode = Utilities.getIsVectorized(conf);
-    boolean isAcidRead = isAcidRead(conf, inputSplit);
+    boolean isAcidRead = isFullAcidRead(conf, inputSplit);
     if (!isAcidRead) {
       if (vectorMode) {
         return createVectorizedReader(inputSplit, conf, reporter);

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
index 61565ef..91d855b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
@@ -233,14 +233,16 @@ public class OrcSplit extends FileSplit implements 
ColumnarSplit, LlapAwareSplit
   @Override
   public boolean canUseLlapIo(Configuration conf) {
     final boolean hasDelta = deltas != null && !deltas.isEmpty();
-    final boolean isAcidRead = HiveConf.getBoolVar(conf,
-        HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN);
+    final boolean isAcidRead = AcidUtils.isFullAcidScan(conf);
     final boolean isVectorized = HiveConf.getBoolVar(conf,
         HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
-    final AcidUtils.AcidOperationalProperties acidOperationalProperties
-        = AcidUtils.getAcidOperationalProperties(conf);
-    final boolean isSplitUpdate = acidOperationalProperties.isSplitUpdate();
-    assert isSplitUpdate : "should be true in Hive 3.0";
+    Boolean isSplitUpdate = null;
+    if (isAcidRead) {
+      final AcidUtils.AcidOperationalProperties acidOperationalProperties
+          = AcidUtils.getAcidOperationalProperties(conf);
+      isSplitUpdate = acidOperationalProperties.isSplitUpdate();
+      assert isSplitUpdate : "should be true in Hive 3.0";
+    }
 
     if (isOriginal) {
       if (!isAcidRead && !hasDelta) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
index da20004..d4b29d5 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
@@ -158,7 +158,7 @@ public class VectorizedOrcAcidRowBatchReader
   private VectorizedOrcAcidRowBatchReader(JobConf conf, OrcSplit orcSplit, 
Reporter reporter,
       VectorizedRowBatchCtx rowBatchCtx) throws IOException {
     this.rbCtx = rowBatchCtx;
-    final boolean isAcidRead = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_ACID_TABLE_SCAN);
+    final boolean isAcidRead = AcidUtils.isFullAcidScan(conf);
     final AcidUtils.AcidOperationalProperties acidOperationalProperties
             = AcidUtils.getAcidOperationalProperties(conf);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
index 7b157e6..c581bba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.InputFormatChecker;
 import org.apache.hadoop.hive.ql.io.SelfDescribingInputFormatInterface;
 import org.apache.hadoop.io.NullWritable;
@@ -64,7 +65,7 @@ public class VectorizedOrcInputFormat extends 
FileInputFormat<NullWritable, Vect
     VectorizedOrcRecordReader(Reader file, Configuration conf,
         FileSplit fileSplit) throws IOException {
 
-      boolean isAcidRead = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_ACID_TABLE_SCAN);
+      boolean isAcidRead = AcidUtils.isFullAcidScan(conf);
       if (isAcidRead) {
         OrcInputFormat.raiseAcidTablesMustBeReadWithAcidReaderException(conf);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 3968b0e..5bbfe95 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -325,14 +325,14 @@ public final class DbTxnManager extends 
HiveTxnManagerImpl {
     //in a txn assuming we can determine the target is a suitable table type.
     if(queryPlan.getOperation() == HiveOperation.LOAD && 
queryPlan.getOutputs() != null && queryPlan.getOutputs().size() == 1) {
       WriteEntity writeEntity = queryPlan.getOutputs().iterator().next();
-      if(AcidUtils.isAcidTable(writeEntity.getTable()) || 
AcidUtils.isInsertOnlyTable(writeEntity.getTable())) {
+      if(AcidUtils.isTransactionalTable(writeEntity.getTable())) {
         switch (writeEntity.getWriteType()) {
           case INSERT:
             //allow operation in a txn
             return true;
           case INSERT_OVERWRITE:
             //see HIVE-18154
-            return false;
+            return false; // TODO: is this still relevant for insert-only 
tables?
           default:
             //not relevant for LOAD
             return false;
@@ -426,7 +426,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
           continue;
       }
       if(t != null) {
-        compBuilder.setIsAcid(AcidUtils.isAcidTable(t));
+        compBuilder.setIsFullAcid(AcidUtils.isFullAcidTable(t));
       }
       LockComponent comp = compBuilder.build();
       LOG.debug("Adding lock component to lock request " + comp.toString());
@@ -490,7 +490,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
           break;
         case INSERT:
           assert t != null;
-          if(AcidUtils.isAcidTable(t)) {
+          if(AcidUtils.isFullAcidTable(t)) {
             compBuilder.setShared();
           }
           else {
@@ -524,7 +524,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
               output.getWriteType().toString());
       }
       if(t != null) {
-        compBuilder.setIsAcid(AcidUtils.isAcidTable(t));
+        compBuilder.setIsFullAcid(AcidUtils.isFullAcidTable(t));
       }
 
       compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index c8d1589..2152f00 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1802,7 +1802,7 @@ public class Hive {
     Path tblDataLocationPath =  tbl.getDataLocation();
     boolean isMmTableWrite = AcidUtils.isInsertOnlyTable(tbl.getParameters());
     assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName();
-    boolean isFullAcidTable = AcidUtils.isAcidTable(tbl);
+    boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl);
     try {
       // Get the partition object if it already exists
       Partition oldPart = getPartition(tbl, partSpec, false);
@@ -2402,7 +2402,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     Table tbl = getTable(tableName);
     assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName();
     boolean isMmTable = AcidUtils.isInsertOnlyTable(tbl);
-    boolean isFullAcidTable = AcidUtils.isAcidTable(tbl);
+    boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl);
     HiveConf sessionConf = SessionState.getSessionConf();
     if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
       newFiles = Collections.synchronizedList(new ArrayList<Path>());

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
index 0fdff7d..e733b70 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
@@ -410,7 +410,7 @@ public class BucketingSortingReduceSinkOptimizer extends 
Transform {
       if (stack.get(0) instanceof TableScanOperator) {
         TableScanOperator tso = ((TableScanOperator)stack.get(0));
         Table tab = tso.getConf().getTableMetadata();
-        if (AcidUtils.isAcidTable(tab)) {
+        if (AcidUtils.isFullAcidTable(tab)) {
           /*ACID tables have complex directory layout and require merging of 
delta files
           * on read thus we should not try to read bucket files directly*/
           return null;

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 69447d9..59c0fe4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -492,7 +492,7 @@ public final class GenMapRedUtils {
       HiveConf conf, boolean local) throws SemanticException {
     ArrayList<Path> partDir = new ArrayList<Path>();
     ArrayList<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();
-    boolean isAcidTable = false;
+    boolean isFullAcidTable = false;
 
     Path tblDir = null;
     plan.setNameToSplitSample(parseCtx.getNameToSplitSample());
@@ -504,7 +504,7 @@ public final class GenMapRedUtils {
     if (partsList == null) {
       try {
         partsList = PartitionPruner.prune(tsOp, parseCtx, alias_id);
-        isAcidTable = tsOp.getConf().isAcidTable();
+        isFullAcidTable = tsOp.getConf().isFullAcidTable();
       } catch (SemanticException e) {
         throw e;
       }
@@ -541,8 +541,8 @@ public final class GenMapRedUtils {
     long sizeNeeded = Integer.MAX_VALUE;
     int fileLimit = -1;
     if (parseCtx.getGlobalLimitCtx().isEnable()) {
-      if (isAcidTable) {
-        LOG.info("Skip Global Limit optimization for ACID table");
+      if (isFullAcidTable) {
+        LOG.info("Skipping Global Limit optimization for an ACID table");
         parseCtx.getGlobalLimitCtx().disableOpt();
       } else {
         long sizePerRow = HiveConf.getLongVar(parseCtx.getConf(),

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 190771e..1186bd4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -1145,7 +1145,7 @@ public class Vectorizer implements PhysicalPlanResolver {
      *      This picks up Input File Format not supported by the other two.
      */
     private boolean verifyAndSetVectorPartDesc(
-        PartitionDesc pd, boolean isAcidTable,
+        PartitionDesc pd, boolean isFullAcidTable,
         Set<String> inputFileFormatClassNameSet,
         Map<VectorPartitionDesc, VectorPartitionDesc> vectorPartitionDescMap,
         Set<String> enabledConditionsMetSet, ArrayList<String> 
enabledConditionsNotMetList,
@@ -1159,7 +1159,7 @@ public class Vectorizer implements PhysicalPlanResolver {
 
       boolean isInputFileFormatVectorized = 
Utilities.isInputFileFormatVectorized(pd);
 
-      if (isAcidTable) {
+      if (isFullAcidTable) {
 
         // Today, ACID tables are only ORC and that format is vectorizable.  
Verify these
         // assumptions.
@@ -1167,8 +1167,8 @@ public class Vectorizer implements PhysicalPlanResolver {
         
Preconditions.checkState(inputFileFormatClassName.equals(OrcInputFormat.class.getName()));
 
         if (!useVectorizedInputFileFormat) {
-          enabledConditionsNotMetList.add(
-              "Vectorizing ACID tables requires " + 
HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT.varname);
+          enabledConditionsNotMetList.add("Vectorizing ACID tables requires "
+        + 
HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT.varname);
           return false;
         }
 
@@ -1377,7 +1377,7 @@ public class Vectorizer implements PhysicalPlanResolver {
         TableScanOperator tableScanOperator, VectorTaskColumnInfo 
vectorTaskColumnInfo)
             throws SemanticException {
 
-      boolean isAcidTable = tableScanOperator.getConf().isAcidTable();
+      boolean isFullAcidTable = tableScanOperator.getConf().isFullAcidTable();
 
       // These names/types are the data columns plus partition columns.
       final List<String> allColumnNameList = new ArrayList<String>();
@@ -1436,7 +1436,7 @@ public class Vectorizer implements PhysicalPlanResolver {
         }
         Set<Support> newSupportSet = new TreeSet<Support>();
         if (!verifyAndSetVectorPartDesc(
-            partDesc, isAcidTable,
+            partDesc, isFullAcidTable,
             inputFileFormatClassNameSet,
             vectorPartitionDescMap,
             enabledConditionsMetSet, enabledConditionsNotMetList,
@@ -1594,7 +1594,8 @@ public class Vectorizer implements PhysicalPlanResolver {
       // vectorTaskColumnInfo.
       currentOperator = tableScanOperator;
       ImmutablePair<Boolean, Boolean> 
validateInputFormatAndSchemaEvolutionPair =
-          validateInputFormatAndSchemaEvolution(mapWork, alias, 
tableScanOperator, vectorTaskColumnInfo);
+          validateInputFormatAndSchemaEvolution(
+              mapWork, alias, tableScanOperator, vectorTaskColumnInfo);
       if (!validateInputFormatAndSchemaEvolutionPair.left) {
         // Have we already set the enabled conditions not met?
         if (!validateInputFormatAndSchemaEvolutionPair.right) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 834cb0c..4338fa6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -4390,7 +4390,7 @@ public class DDLSemanticAnalyzer extends 
BaseSemanticAnalyzer {
     if (enableFlag) {
       for (String tableName : 
materializedViewTable.getCreationMetadata().getTablesUsed()) {
         Table table = getTable(tableName, true);
-        if (!AcidUtils.isAcidTable(table)) {
+        if (!AcidUtils.isTransactionalTable(table)) {
           throw new SemanticException("Automatic rewriting for materialized 
view cannot "
               + "be enabled if the materialized view uses non-transactional 
tables");
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 3e9c238..7ed9fe4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -2003,7 +2003,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
             "Inconsistent data structure detected: we are writing to " + 
ts.tableHandle  + " in " +
                 name + " but it's not in isInsertIntoTable() or 
getInsertOverwriteTables()";
         // Disallow update and delete on non-acid tables
-        boolean isAcid = AcidUtils.isAcidTable(ts.tableHandle);
+        boolean isAcid = AcidUtils.isFullAcidTable(ts.tableHandle);
         if ((updating(name) || deleting(name)) && !isAcid) {
           // Whether we are using an acid compliant transaction manager has 
already been caught in
           // UpdateDeleteSemanticAnalyzer, so if we are updating or deleting 
and getting nonAcid
@@ -6586,7 +6586,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         nullOrder.append(sortOrder == 
BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? 'a' : 'z');
       }
       input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), 
nullOrder.toString(),
-          maxReducers, (AcidUtils.isAcidTable(dest_tab) ?
+          maxReducers, (AcidUtils.isFullAcidTable(dest_tab) ?
               getAcidType(table_desc.getOutputFileFormatClass(), dest) : 
AcidUtils.Operation.NOT_ACID));
       
reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator)input.getParentOperators().get(0));
       ctx.setMultiFileSpray(multiFileSpray);
@@ -6796,7 +6796,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 
       dest_tab = qbm.getDestTableForAlias(dest);
       destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab);
-      destTableIsFullAcid = AcidUtils.isAcidTable(dest_tab);
+      destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab);
       destTableIsTemporary = dest_tab.isTemporary();
 
       // Is the user trying to insert into a external tables
@@ -6909,7 +6909,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       dest_part = qbm.getDestPartitionForAlias(dest);
       dest_tab = dest_part.getTable();
       destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab);
-      destTableIsFullAcid = AcidUtils.isAcidTable(dest_tab);
+      destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab);
 
       checkExternalTable(dest_tab);
 
@@ -7034,7 +7034,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       }
 
       destTableIsTransactional = tblDesc != null && 
AcidUtils.isTransactionalTable(tblDesc);
-      destTableIsFullAcid = tblDesc != null && AcidUtils.isAcidTable(tblDesc);
+      destTableIsFullAcid = tblDesc != null && 
AcidUtils.isFullAcidTable(tblDesc);
 
       boolean isDestTempFile = true;
       if (!ctx.isMRTmpFileURI(dest_path.toUri().toString())) {
@@ -12896,7 +12896,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
             throw new SemanticException("View definition references 
materialized view " + alias);
           }
           if (createVwDesc.isMaterialized() && createVwDesc.isRewriteEnabled() 
&&
-              !AcidUtils.isAcidTable(table)) {
+              !AcidUtils.isTransactionalTable(table)) {
             throw new SemanticException("Automatic rewriting for materialized 
view cannot "
                 + "be enabled if the materialized view uses non-transactional 
tables");
           }

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
index e1cea22..abb2e88 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
@@ -160,8 +160,10 @@ public class TableExport {
   }
 
   private boolean shouldExport() {
+    // Note: this is a temporary setting that is needed because replication 
does not support
+    //       ACID or MM tables at the moment. It will eventually be removed.
     if (conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_INCLUDE_ACID_TABLES)
-        && AcidUtils.isAcidTable(tableSpec.tableHandle)) {
+        && AcidUtils.isTransactionalTable(tableSpec.tableHandle)) {
       return true;
     }
     return Utils.shouldReplicate(replicationSpec, tableSpec.tableHandle, conf);

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
index 661446d..50ceba5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
@@ -104,7 +104,7 @@ public class TableScanDesc extends AbstractOperatorDesc 
implements IStatsGatherD
 
   private boolean isMetadataOnly = false;
 
-  private boolean isAcidTable;
+  private boolean isTranscationalTable;
 
   private boolean vectorized;
 
@@ -135,8 +135,8 @@ public class TableScanDesc extends AbstractOperatorDesc 
implements IStatsGatherD
     this.alias = alias;
     this.virtualCols = vcs;
     this.tableMetadata = tblMetadata;
-    isAcidTable = AcidUtils.isAcidTable(this.tableMetadata);
-    if (isAcidTable) {
+    isTranscationalTable = AcidUtils.isTransactionalTable(this.tableMetadata);
+    if (isTranscationalTable) {
       acidOperationalProperties = 
AcidUtils.getAcidOperationalProperties(this.tableMetadata);
     }
   }
@@ -177,8 +177,10 @@ public class TableScanDesc extends AbstractOperatorDesc 
implements IStatsGatherD
     StringBuilder sb = new StringBuilder();
     sb.append(this.tableMetadata.getCompleteName());
     sb.append("," + alias);
-    if (isAcidTable()) {
+    if (AcidUtils.isFullAcidTable(tableMetadata)) {
       sb.append(", ACID table");
+    } else if (isTranscationalTable()) {
+      sb.append(", transactional table");
     }
     sb.append(",Tbl:");
     sb.append(this.statistics.getBasicStatsState());
@@ -187,8 +189,8 @@ public class TableScanDesc extends AbstractOperatorDesc 
implements IStatsGatherD
     return sb.toString();
   }
 
-  public boolean isAcidTable() {
-    return isAcidTable;
+  public boolean isTranscationalTable() {
+    return isTranscationalTable;
   }
 
   public AcidUtils.AcidOperationalProperties getAcidOperationalProperties() {
@@ -518,4 +520,8 @@ public class TableScanDesc extends AbstractOperatorDesc 
implements IStatsGatherD
     }
     return false;
   }
+
+  public boolean isFullAcidTable() {
+     return isTranscationalTable() && 
!getAcidOperationalProperties().isInsertOnly();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java 
b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
index 78f48b1..05b0474 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
@@ -48,7 +48,7 @@ public abstract class Partish {
   // rename
   @Deprecated
   public final boolean isAcid() {
-    return AcidUtils.isAcidTable(getTable());
+    return AcidUtils.isFullAcidTable(getTable());
   }
 
   public abstract Table getTable();

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 0e456df..236e585 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -48,6 +48,7 @@ import 
org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.AcidUtils.AcidOperationalProperties;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
@@ -146,7 +147,7 @@ public class CompactorMR {
     job.setBoolean("mapreduce.map.speculative", false);
 
     // Set appropriate Acid readers/writers based on the table properties.
-    AcidUtils.setAcidOperationalProperties(job,
+    AcidUtils.setAcidOperationalProperties(job, true,
             AcidUtils.getAcidOperationalProperties(t.getParameters()));
 
     return job;
@@ -356,6 +357,7 @@ public class CompactorMR {
    * to use.
    * @param job the job to update
    * @param cols the columns of the table
+   * @param map 
    */
   private void setColumnTypes(JobConf job, List<FieldSchema> cols) {
     StringBuilder colNames = new StringBuilder();
@@ -373,7 +375,6 @@ public class CompactorMR {
     }
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, colNames.toString());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, colTypes.toString());
-    HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
     HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, 
HiveInputFormat.class.getName());
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
index 8945fdf..5632350 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
@@ -597,14 +597,14 @@ public class TestAcidUtils {
     AcidUtils.AcidOperationalProperties oprProps = 
AcidUtils.AcidOperationalProperties.getDefault();
     Configuration testConf = new Configuration();
     // Test setter for configuration object.
-    AcidUtils.setAcidOperationalProperties(testConf, oprProps);
+    AcidUtils.setAcidOperationalProperties(testConf, true, oprProps);
     assertEquals(1, 
testConf.getInt(HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES.varname, -1));
     // Test getter for configuration object.
     assertEquals(oprProps.toString(), 
AcidUtils.getAcidOperationalProperties(testConf).toString());
 
     Map<String, String> parameters = new HashMap<String, String>();
     // Test setter for map object.
-    AcidUtils.setAcidOperationalProperties(parameters, oprProps);
+    AcidUtils.setAcidOperationalProperties(parameters, true, oprProps);
     assertEquals(oprProps.toString(),
         
parameters.get(HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES.varname));
     // Test getter for map object.

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 92f005d..8a6a056 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -836,7 +836,7 @@ public class TestInputOutputFormat {
   public void testEtlCombinedStrategy() throws Exception {
     conf.set(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname, "ETL");
     conf.set(HiveConf.ConfVars.HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS.varname, 
"1000000");
-    AcidUtils.setAcidTableScan(conf, true);
+    AcidUtils.setAcidOperationalProperties(conf, true, null);
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     conf.set(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, 
"default");
 
@@ -2285,7 +2285,7 @@ public class TestInputOutputFormat {
 
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
BigRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
BigRow.getColumnTypesProperty());
-    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
true);
 
     org.apache.hadoop.mapred.RecordReader<NullWritable, VectorizedRowBatch>
         reader = inputFormat.getRecordReader(splits[0], conf, Reporter.NULL);
@@ -3377,7 +3377,7 @@ public class TestInputOutputFormat {
   public void testACIDReaderNoFooterSerialize() throws Exception {
     MockFileSystem fs = new MockFileSystem(conf);
     MockPath mockPath = new MockPath(fs, "mock:///mocktable5");
-    conf.set(ConfVars.HIVE_ACID_TABLE_SCAN.varname, "true");
+    conf.set(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, "true");
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
@@ -3458,7 +3458,7 @@ public class TestInputOutputFormat {
   public void testACIDReaderFooterSerialize() throws Exception {
     MockFileSystem fs = new MockFileSystem(conf);
     MockPath mockPath = new MockPath(fs, "mock:///mocktable6");
-    conf.set(ConfVars.HIVE_ACID_TABLE_SCAN.varname, "true");
+    conf.set(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, "true");
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
@@ -3569,7 +3569,8 @@ public class TestInputOutputFormat {
 
     //set up props for read
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
-    AcidUtils.setAcidTableScan(conf, true);
+    AcidUtils.setAcidOperationalProperties(conf, true, null);
+
 
     OrcInputFormat orcInputFormat = new OrcInputFormat();
     InputSplit[] splits = orcInputFormat.getSplits(conf, 2);
@@ -3648,7 +3649,7 @@ public class TestInputOutputFormat {
 
     //set up props for read
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
-    AcidUtils.setAcidTableScan(conf, true);
+    AcidUtils.setAcidOperationalProperties(conf, true, null);
 
     OrcInputFormat orcInputFormat = new OrcInputFormat();
     InputSplit[] splits = orcInputFormat.getSplits(conf, 2);

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
index c6a866a..bbd040a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
@@ -384,7 +384,7 @@ public class TestOrcRawRecordMerger {
     Configuration conf = new Configuration();
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "col1");
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "string");
-    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
true);
     Reader reader = Mockito.mock(Reader.class, settings);
     RecordReader recordReader = Mockito.mock(RecordReader.class, settings);
 
@@ -602,7 +602,7 @@ public class TestOrcRawRecordMerger {
         OrcFile.readerOptions(conf));
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
-    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
true);
     OrcRawRecordMerger merger =
         new OrcRawRecordMerger(conf, true, baseReader, false, BUCKET,
             createMaximalTxnList(), new Reader.Options(),
@@ -681,7 +681,7 @@ public class TestOrcRawRecordMerger {
 
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
-    AcidUtils.setAcidTableScan(conf,true);
+    AcidUtils.setAcidOperationalProperties(conf, true, null);
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
 
     //the first "split" is for base/
@@ -1149,7 +1149,8 @@ public class TestOrcRawRecordMerger {
     JobConf job = new JobConf();
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
BigRow.getColumnNamesProperty());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
BigRow.getColumnTypesProperty());
-    AcidUtils.setAcidTableScan(job,true);
+    AcidUtils.setAcidOperationalProperties(job, true, null);
+
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     job.set("mapred.min.split.size", "1");
     job.set("mapred.max.split.size", "2");
@@ -1284,7 +1285,8 @@ public class TestOrcRawRecordMerger {
     job.set("mapred.input.dir", root.toString());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
BigRow.getColumnNamesProperty());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
BigRow.getColumnTypesProperty());
-    AcidUtils.setAcidTableScan(job,true);
+    AcidUtils.setAcidOperationalProperties(job, true, null);
+
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     InputSplit[] splits = inf.getSplits(job, 5);
     //base has 10 rows, so 5 splits, 1 delta has 2 rows so 1 split, and 1 
delta has 3 so 2 splits
@@ -1381,7 +1383,8 @@ public class TestOrcRawRecordMerger {
     job.set("bucket_count", "1");
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
-    AcidUtils.setAcidTableScan(job,true);
+    AcidUtils.setAcidOperationalProperties(job, true, null);
+
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     InputSplit[] splits = inf.getSplits(job, 5);
     assertEquals(2, splits.length);
@@ -1455,7 +1458,8 @@ public class TestOrcRawRecordMerger {
     job.set("bucket_count", "2");
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
-    AcidUtils.setAcidTableScan(job,true);
+    AcidUtils.setAcidOperationalProperties(job, true, null);
+
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
 
     // read the keys before the delta is flushed

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
index 65508f4..68cde2d 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
@@ -92,7 +92,7 @@ public class TestVectorizedOrcAcidRowBatchReader {
     conf = new JobConf();
     conf.set("bucket_count", "1");
     conf.set(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
-    conf.setBoolean(HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN.varname, true);
+    conf.setBoolean(HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, 
true);
     conf.set(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, 
"default");
     conf.setInt(HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES.varname,
         AcidUtils.AcidOperationalProperties.getDefault().toInt());

http://git-wip-us.apache.org/repos/asf/hive/blob/ab33a7b7/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
index de6c718..6e45187 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
@@ -77,7 +77,7 @@ public class LockComponentBuilder {
     return this;
   }
 
-  public LockComponentBuilder setIsAcid(boolean t) {
+  public LockComponentBuilder setIsFullAcid(boolean t) {
     component.setIsAcid(t);
     return this;
   }

Reply via email to