HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs 
(Ashutosh Chauhan via Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5562fae7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5562fae7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5562fae7

Branch: refs/heads/master
Commit: 5562fae73e417c81a193c1e6deb6388d3fef746b
Parents: 1b6600d
Author: Ashutosh Chauhan <hashut...@apache.org>
Authored: Tue Nov 24 17:08:35 2015 -0800
Committer: Ashutosh Chauhan <hashut...@apache.org>
Committed: Tue Nov 24 17:08:35 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   8 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   1 +
 .../apache/hadoop/hive/ql/exec/Utilities.java   |   3 +-
 .../optimizer/SortedDynPartitionOptimizer.java  |  86 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  54 +-
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |  14 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |   6 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      |   5 +-
 .../queries/clientnegative/acid_overwrite.q     |   2 +-
 .../queries/clientnegative/archive_corrupt.q    |   2 +-
 .../authorization_delete_nodeletepriv.q         |   2 +-
 .../authorization_update_noupdatepriv.q         |   2 +-
 .../clientnegative/delete_non_acid_table.q      |   2 +-
 .../clientnegative/delete_not_bucketed.q        |   2 +-
 .../test/queries/clientnegative/delete_sorted.q |   2 +-
 .../test/queries/clientnegative/insert_sorted.q |   2 +-
 .../clientnegative/insert_values_sorted.q       |   2 +-
 .../queries/clientnegative/merge_negative_3.q   |   4 +-
 .../queries/clientnegative/smb_bucketmapjoin.q  |   4 +-
 .../queries/clientnegative/smb_mapjoin_14.q     |   4 +-
 .../sortmerge_mapjoin_mismatch_1.q              |   4 +-
 .../queries/clientnegative/update_bucket_col.q  |   2 +-
 .../clientnegative/update_no_such_table.q       |   2 +-
 .../clientnegative/update_non_acid_table.q      |   2 +-
 .../clientnegative/update_not_bucketed.q        |   2 +-
 .../clientnegative/update_partition_col.q       |   2 +-
 .../test/queries/clientnegative/update_sorted.q |   2 +-
 ql/src/test/queries/clientpositive/acid_join.q  |   2 +-
 .../queries/clientpositive/acid_vectorization.q |   2 +-
 .../acid_vectorization_partition.q              |   2 +-
 .../clientpositive/acid_vectorization_project.q |   2 +-
 .../alter_numbuckets_partitioned_table.q        |  59 --
 .../alter_numbuckets_partitioned_table2.q       |  85 --
 .../alter_numbuckets_partitioned_table2_h23.q   |   5 +-
 .../alter_numbuckets_partitioned_table_h23.q    |   2 +-
 .../clientpositive/archive_excludeHadoop20.q    |   2 +-
 .../test/queries/clientpositive/archive_multi.q |   2 +-
 .../clientpositive/authorization_delete.q       |   2 +-
 .../authorization_delete_own_table.q            |   2 +-
 .../clientpositive/authorization_update.q       |   2 +-
 .../authorization_update_own_table.q            |   2 +-
 .../clientpositive/auto_smb_mapjoin_14.q        |   4 +-
 .../clientpositive/auto_sortmerge_join_10.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_13.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_14.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_15.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_16.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_6.q      |   4 +-
 .../clientpositive/auto_sortmerge_join_9.q      |   4 +-
 ql/src/test/queries/clientpositive/bucket1.q    |   2 +-
 ql/src/test/queries/clientpositive/bucket2.q    |   2 +-
 ql/src/test/queries/clientpositive/bucket3.q    |   2 +-
 ql/src/test/queries/clientpositive/bucket4.q    |   4 +-
 ql/src/test/queries/clientpositive/bucket5.q    |   4 +-
 ql/src/test/queries/clientpositive/bucket6.q    |   4 +-
 .../test/queries/clientpositive/bucket_many.q   |   2 +-
 .../queries/clientpositive/bucket_map_join_1.q  |   4 +-
 .../queries/clientpositive/bucket_map_join_2.q  |   4 +-
 .../clientpositive/bucket_map_join_spark4.q     |   8 +-
 .../clientpositive/bucket_map_join_tez1.q       |   4 +-
 .../clientpositive/bucket_map_join_tez2.q       |   4 +-
 .../clientpositive/bucket_num_reducers.q        |   2 +-
 .../clientpositive/bucket_num_reducers2.q       |   2 +-
 .../queries/clientpositive/bucketmapjoin13.q    |   4 +-
 .../queries/clientpositive/bucketmapjoin6.q     |   4 +-
 .../bucketsortoptimize_insert_1.q               |   4 +-
 .../bucketsortoptimize_insert_2.q               |   4 +-
 .../bucketsortoptimize_insert_3.q               |   4 +-
 .../bucketsortoptimize_insert_4.q               |   4 +-
 .../bucketsortoptimize_insert_5.q               |   4 +-
 .../bucketsortoptimize_insert_6.q               |   4 +-
 .../bucketsortoptimize_insert_7.q               |   4 +-
 .../bucketsortoptimize_insert_8.q               |   4 +-
 .../queries/clientpositive/cbo_rp_auto_join1.q  |   4 +-
 ql/src/test/queries/clientpositive/combine3.q   |   2 +-
 .../clientpositive/delete_all_non_partitioned.q |   2 +-
 .../clientpositive/delete_all_partitioned.q     |   2 +-
 .../queries/clientpositive/delete_orig_table.q  |   2 +-
 .../queries/clientpositive/delete_tmp_table.q   |   2 +-
 .../clientpositive/delete_where_no_match.q      |   2 +-
 .../delete_where_non_partitioned.q              |   2 +-
 .../clientpositive/delete_where_partitioned.q   |   2 +-
 .../clientpositive/delete_whole_partition.q     |   2 +-
 .../disable_merge_for_bucketing.q               |   2 +-
 .../clientpositive/dynpart_sort_opt_bucketing.q |   8 +-
 .../dynpart_sort_opt_vectorization.q            |   8 +-
 .../clientpositive/dynpart_sort_optimization.q  |   8 +-
 .../clientpositive/dynpart_sort_optimization2.q |   4 +-
 .../dynpart_sort_optimization_acid.q            |   2 +-
 .../encryption_insert_partition_dynamic.q       |   2 +-
 .../encryption_insert_partition_static.q        |   2 +-
 .../test/queries/clientpositive/enforce_order.q |   2 +-
 .../test/queries/clientpositive/explainuser_1.q |   6 +-
 .../test/queries/clientpositive/explainuser_2.q |   4 +-
 .../test/queries/clientpositive/explainuser_3.q |   6 +-
 .../queries/clientpositive/groupby_sort_1.q     |   4 +-
 .../queries/clientpositive/groupby_sort_10.q    |   4 +-
 .../queries/clientpositive/groupby_sort_11.q    |   4 +-
 .../queries/clientpositive/groupby_sort_1_23.q  |   4 +-
 .../queries/clientpositive/groupby_sort_2.q     |   4 +-
 .../queries/clientpositive/groupby_sort_3.q     |   4 +-
 .../queries/clientpositive/groupby_sort_4.q     |   4 +-
 .../queries/clientpositive/groupby_sort_5.q     |   4 +-
 .../queries/clientpositive/groupby_sort_6.q     |   4 +-
 .../queries/clientpositive/groupby_sort_7.q     |   4 +-
 .../queries/clientpositive/groupby_sort_8.q     |   4 +-
 .../queries/clientpositive/groupby_sort_9.q     |   4 +-
 .../clientpositive/groupby_sort_skew_1.q        |   4 +-
 .../clientpositive/groupby_sort_skew_1_23.q     |   4 +-
 .../clientpositive/groupby_sort_test_1.q        |   4 +-
 .../infer_bucket_sort_bucketed_table.q          |   4 +-
 .../infer_bucket_sort_map_operators.q           |   4 +-
 .../insert_acid_dynamic_partition.q             |   2 +-
 .../clientpositive/insert_acid_not_bucketed.q   |   2 +-
 .../clientpositive/insert_into_with_schema2.q   |   2 +-
 .../clientpositive/insert_nonacid_from_acid.q   |   2 +-
 .../queries/clientpositive/insert_orig_table.q  |   2 +-
 .../clientpositive/insert_update_delete.q       |   2 +-
 .../insert_values_acid_not_bucketed.q           |   2 +-
 .../insert_values_dynamic_partitioned.q         |   2 +-
 .../insert_values_non_partitioned.q             |   2 +-
 .../clientpositive/insert_values_orig_table.q   |   2 +-
 .../clientpositive/insert_values_partitioned.q  |   2 +-
 .../clientpositive/insert_values_tmp_table.q    |   2 +-
 .../clientpositive/insertoverwrite_bucket.q     |   4 +-
 .../test/queries/clientpositive/join_nullsafe.q |   4 +-
 .../queries/clientpositive/load_dyn_part2.q     |   2 +-
 ql/src/test/queries/clientpositive/mergejoin.q  |   4 +-
 .../queries/clientpositive/orc_empty_files.q    |   2 +-
 .../partition_wise_fileformat14.q               |   4 +-
 .../test/queries/clientpositive/quotedid_smb.q  |   4 +-
 .../queries/clientpositive/reduce_deduplicate.q |   2 +-
 ql/src/test/queries/clientpositive/sample10.q   |   2 +-
 .../test/queries/clientpositive/smb_mapjoin9.q  |   4 +-
 .../queries/clientpositive/smb_mapjoin_11.q     |   8 +-
 .../queries/clientpositive/smb_mapjoin_12.q     |   8 +-
 .../queries/clientpositive/smb_mapjoin_13.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_14.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_15.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_16.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_17.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_18.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_19.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_20.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_21.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_22.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_25.q     |   4 +-
 .../test/queries/clientpositive/smb_mapjoin_6.q |   4 +-
 .../test/queries/clientpositive/smb_mapjoin_7.q |   4 +-
 .../test/queries/clientpositive/smb_mapjoin_8.q |   4 +-
 .../clientpositive/sort_merge_join_desc_1.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_2.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_3.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_4.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_5.q     |   4 +-
 .../clientpositive/sort_merge_join_desc_6.q     |   4 +-
 .../clientpositive/sort_merge_join_desc_7.q     |   4 +-
 .../clientpositive/sort_merge_join_desc_8.q     |   2 +-
 ql/src/test/queries/clientpositive/stats10.q    |   2 +-
 .../clientpositive/tez_bmj_schema_evolution.q   |   4 +-
 ql/src/test/queries/clientpositive/tez_fsstat.q |   4 +-
 ql/src/test/queries/clientpositive/tez_smb_1.q  |   4 +-
 .../test/queries/clientpositive/tez_smb_empty.q |   4 +-
 .../test/queries/clientpositive/tez_smb_main.q  |   4 +-
 .../queries/clientpositive/transform_acid.q     |   2 +-
 .../clientpositive/truncate_column_buckets.q    |   2 +-
 .../update_after_multiple_inserts.q             |   2 +-
 .../clientpositive/update_all_non_partitioned.q |   2 +-
 .../clientpositive/update_all_partitioned.q     |   2 +-
 .../queries/clientpositive/update_all_types.q   |   2 +-
 .../queries/clientpositive/update_orig_table.q  |   2 +-
 .../queries/clientpositive/update_tmp_table.q   |   2 +-
 .../queries/clientpositive/update_two_cols.q    |   2 +-
 .../clientpositive/update_where_no_match.q      |   2 +-
 .../update_where_non_partitioned.q              |   2 +-
 .../clientpositive/update_where_partitioned.q   |   2 +-
 .../clientpositive/vector_auto_smb_mapjoin_14.q |   4 +-
 .../test/queries/clientpositive/vector_bucket.q |   2 +-
 .../alter_numbuckets_partitioned_table.q.out    | 553 ------------
 .../alter_numbuckets_partitioned_table2.q.out   | 851 -------------------
 ...lter_numbuckets_partitioned_table2_h23.q.out |   6 +-
 ql/src/test/results/clientpositive/cp_sel.q.out |  81 +-
 .../clientpositive/index_auto_update.q.out      |   2 +-
 .../insert_into_with_schema2.q.out              |  12 +-
 .../results/clientpositive/orc_analyze.q.out    |  48 +-
 .../results/clientpositive/smb_mapjoin_11.q.out | 217 ++---
 .../clientpositive/spark/smb_mapjoin_11.q.out   |  74 +-
 .../clientpositive/spark/smb_mapjoin_12.q.out   | 154 ++--
 .../results/clientpositive/spark/stats9.q.out   |   2 +-
 ql/src/test/results/clientpositive/stats9.q.out |   2 +-
 .../clientpositive/tez/orc_analyze.q.out        |  48 +-
 191 files changed, 690 insertions(+), 2220 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index fffedd9..2bd850d 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1198,12 +1198,8 @@ public class HiveConf extends Configuration {
         "The log level to use for tasks executing as part of the DAG.\n" +
         "Used only if hive.tez.java.opts is used to configure Java options."),
 
-    HIVEENFORCEBUCKETING("hive.enforce.bucketing", false,
-        "Whether bucketing is enforced. If true, while inserting into the 
table, bucketing is enforced."),
-    HIVEENFORCESORTING("hive.enforce.sorting", false,
-        "Whether sorting is enforced. If true, while inserting into the table, 
sorting is enforced."),
     HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
-        "If hive.enforce.bucketing or hive.enforce.sorting is true, don't 
create a reducer for enforcing \n" +
+        "Don't create a reducer for enforcing \n" +
         "bucketing/sorting for queries of the form: \n" +
         "insert overwrite table T2 select * from T1;\n" +
         "where T1 and T2 are bucketed/sorted by the same keys into the same 
number of buckets."),
@@ -3082,9 +3078,7 @@ public class HiveConf extends Configuration {
     ConfVars.DROPIGNORESNONEXISTENT.varname,
     ConfVars.HIVECOUNTERGROUP.varname,
     ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname,
-    ConfVars.HIVEENFORCEBUCKETING.varname,
     ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname,
-    ConfVars.HIVEENFORCESORTING.varname,
     ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname,
     ConfVars.HIVEEXPREVALUATIONCACHE.varname,
     ConfVars.HIVEHASHTABLELOADFACTOR.varname,

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java 
b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 892587a..8a47605 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -423,6 +423,7 @@ public enum ErrorMsg {
   IMPORT_INTO_STRICT_REPL_TABLE(10303,"Non-repl import disallowed against 
table that is a destination of replication."),
   CTAS_LOCATION_NONEMPTY(10304, "CREATE-TABLE-AS-SELECT cannot create table 
with location to a non-empty directory."),
   CTAS_CREATES_VOID_TYPE(10305, "CREATE-TABLE-AS-SELECT creates a VOID type, 
please use CAST to specify the type, near field: "),
+  TBL_SORTED_NOT_BUCKETED(10306, "Destination table {0} found to be sorted but 
not bucketed.", true),
   //========================== 20000 range starts here 
========================//
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
   SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your 
custom script. "

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 8b8cf6d..4eb46ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2159,8 +2159,7 @@ public final class Utilities {
       FileStatus[] items = fs.listStatus(path);
       taskIDToFile = removeTempOrDuplicateFiles(items, fs);
       if(taskIDToFile != null && taskIDToFile.size() > 0 && conf != null && 
conf.getTable() != null
-          && (conf.getTable().getNumBuckets() > taskIDToFile.size())
-          && (HiveConf.getBoolVar(hconf, 
HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+          && (conf.getTable().getNumBuckets() > taskIDToFile.size())) {
           // get the missing buckets and generate empty buckets for 
non-dynamic partition
         String taskID1 = taskIDToFile.keySet().iterator().next();
         Path bucketPath = taskIDToFile.values().iterator().next().getPath();

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
index e2a0eae..c3553a5 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
@@ -29,8 +29,6 @@ import java.util.Stack;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.ObjectPair;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -76,8 +74,7 @@ import com.google.common.collect.Maps;
  * When dynamic partitioning (with or without bucketing and sorting) is 
enabled, this optimization
  * sorts the records on partition, bucket and sort columns respectively before 
inserting records
  * into the destination table. This enables reducers to keep only one record 
writer all the time
- * thereby reducing the the memory pressure on the reducers. This optimization 
will force a reducer
- * even when hive.enforce.bucketing and hive.enforce.sorting is set to false.
+ * thereby reducing the the memory pressure on the reducers.
  */
 public class SortedDynPartitionOptimizer implements Transform {
 
@@ -270,58 +267,53 @@ public class SortedDynPartitionOptimizer implements 
Transform {
     // Remove RS and SEL introduced by enforce bucketing/sorting config
     // Convert PARENT -> RS -> SEL -> FS to PARENT -> FS
     private boolean removeRSInsertedByEnforceBucketing(FileSinkOperator fsOp) {
-      HiveConf hconf = parseCtx.getConf();
-      boolean enforceBucketing = HiveConf.getBoolVar(hconf, 
ConfVars.HIVEENFORCEBUCKETING);
-      boolean enforceSorting = HiveConf.getBoolVar(hconf, 
ConfVars.HIVEENFORCESORTING);
-      if (enforceBucketing || enforceSorting) {
-        Set<ReduceSinkOperator> reduceSinks = 
OperatorUtils.findOperatorsUpstream(fsOp,
-            ReduceSinkOperator.class);
-        Operator<? extends OperatorDesc> rsToRemove = null;
-        List<ReduceSinkOperator> rsOps = parseCtx
-            .getReduceSinkOperatorsAddedByEnforceBucketingSorting();
-        boolean found = false;
-
-        // iterate through all RS and locate the one introduce by enforce 
bucketing
-        for (ReduceSinkOperator reduceSink : reduceSinks) {
-          for (ReduceSinkOperator rsOp : rsOps) {
-            if (reduceSink.equals(rsOp)) {
-              rsToRemove = reduceSink;
-              found = true;
-              break;
-            }
-          }
 
-          if (found) {
+      Set<ReduceSinkOperator> reduceSinks = 
OperatorUtils.findOperatorsUpstream(fsOp,
+          ReduceSinkOperator.class);
+      Operator<? extends OperatorDesc> rsToRemove = null;
+      List<ReduceSinkOperator> rsOps = parseCtx
+          .getReduceSinkOperatorsAddedByEnforceBucketingSorting();
+      boolean found = false;
+
+      // iterate through all RS and locate the one introduce by enforce 
bucketing
+      for (ReduceSinkOperator reduceSink : reduceSinks) {
+        for (ReduceSinkOperator rsOp : rsOps) {
+          if (reduceSink.equals(rsOp)) {
+            rsToRemove = reduceSink;
+            found = true;
             break;
           }
         }
 
-        // iF RS is found remove it and its child (EX) and connect its parent
-        // and grand child
         if (found) {
-          Operator<? extends OperatorDesc> rsParent = 
rsToRemove.getParentOperators().get(0);
-          Operator<? extends OperatorDesc> rsChild = 
rsToRemove.getChildOperators().get(0);
-          Operator<? extends OperatorDesc> rsGrandChild = 
rsChild.getChildOperators().get(0);
-
-          if (rsChild instanceof SelectOperator) {
-            // if schema size cannot be matched, then it could be because of 
constant folding
-            // converting partition column expression to constant expression. 
The constant
-            // expression will then get pruned by column pruner since it will 
not reference to
-            // any columns.
-            if (rsParent.getSchema().getSignature().size() !=
-                rsChild.getSchema().getSignature().size()) {
-              return false;
-            }
-            rsParent.getChildOperators().clear();
-            rsParent.getChildOperators().add(rsGrandChild);
-            rsGrandChild.getParentOperators().clear();
-            rsGrandChild.getParentOperators().add(rsParent);
-            LOG.info("Removed " + rsToRemove.getOperatorId() + " and " + 
rsChild.getOperatorId()
-                + " as it was introduced by enforce bucketing/sorting.");
-          }
+          break;
         }
       }
 
+      // iF RS is found remove it and its child (EX) and connect its parent
+      // and grand child
+      if (found) {
+        Operator<? extends OperatorDesc> rsParent = 
rsToRemove.getParentOperators().get(0);
+        Operator<? extends OperatorDesc> rsChild = 
rsToRemove.getChildOperators().get(0);
+        Operator<? extends OperatorDesc> rsGrandChild = 
rsChild.getChildOperators().get(0);
+
+        if (rsChild instanceof SelectOperator) {
+          // if schema size cannot be matched, then it could be because of 
constant folding
+          // converting partition column expression to constant expression. 
The constant
+          // expression will then get pruned by column pruner since it will 
not reference to
+          // any columns.
+          if (rsParent.getSchema().getSignature().size() !=
+              rsChild.getSchema().getSignature().size()) {
+            return false;
+          }
+          rsParent.getChildOperators().clear();
+          rsParent.getChildOperators().add(rsGrandChild);
+          rsGrandChild.getParentOperators().clear();
+          rsGrandChild.getParentOperators().add(rsParent);
+          LOG.info("Removed " + rsToRemove.getOperatorId() + " and " + 
rsChild.getOperatorId()
+              + " as it was introduced by enforce bucketing/sorting.");
+        }
+      }
       return true;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 69bb9d7..1b7873d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6054,7 +6054,6 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     // spray the data into multiple buckets. That way, we can support a very 
large
     // number of buckets without needing a very large number of reducers.
     boolean enforceBucketing = false;
-    boolean enforceSorting = false;
     ArrayList<ExprNodeDesc> partnCols = new ArrayList<ExprNodeDesc>();
     ArrayList<ExprNodeDesc> sortCols = new ArrayList<ExprNodeDesc>();
     ArrayList<Integer> sortOrders = new ArrayList<Integer>();
@@ -6062,8 +6061,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     int numFiles = 1;
     int totalFiles = 1;
 
-    if ((dest_tab.getNumBuckets() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+    if (dest_tab.getNumBuckets() > 0) {
       enforceBucketing = true;
       if (updating() || deleting()) {
         partnCols = getPartitionColsFromBucketColsForUpdateDelete(input, true);
@@ -6073,24 +6071,27 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     }
 
     if ((dest_tab.getSortCols() != null) &&
-        (dest_tab.getSortCols().size() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))) {
-      enforceSorting = true;
+        (dest_tab.getSortCols().size() > 0)) {
       sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
       sortOrders = getSortOrders(dest, qb, dest_tab, input);
-      if (!enforceBucketing) {
-        partnCols = sortCols;
+      if (!enforceBucketing && !dest_tab.isIndexTable()) {
+        throw new 
SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
+      } else {
+        if (!enforceBucketing) {
+          partnCols = sortCols;
+        }
       }
+      enforceBucketing = true;
     }
 
-    if (enforceBucketing || enforceSorting) {
+    if (enforceBucketing) {
       int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
       if (conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS) > 0) {
         maxReducers = conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS);
       }
       int numBuckets = dest_tab.getNumBuckets();
       if (numBuckets > maxReducers) {
-        LOG.debug("XXXXXX numBuckets is " + numBuckets + " and maxReducers is 
" + maxReducers);
+        LOG.debug("numBuckets is {}", numBuckets, " and maxReducers is {}", 
maxReducers);
         multiFileSpray = true;
         totalFiles = numBuckets;
         if (totalFiles % maxReducers == 0) {
@@ -6123,11 +6124,9 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   private void genPartnCols(String dest, Operator input, QB qb,
       TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws 
SemanticException {
     boolean enforceBucketing = false;
-    boolean enforceSorting = false;
     ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
 
-    if ((dest_tab.getNumBuckets() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+    if ((dest_tab.getNumBuckets() > 0)) {
       enforceBucketing = true;
       if (updating() || deleting()) {
         partnColsNoConvert = 
getPartitionColsFromBucketColsForUpdateDelete(input, false);
@@ -6138,15 +6137,19 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     }
 
     if ((dest_tab.getSortCols() != null) &&
-        (dest_tab.getSortCols().size() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))) {
-      enforceSorting = true;
-      if (!enforceBucketing) {
-        partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, 
input, false);
+        (dest_tab.getSortCols().size() > 0)) {
+      if (!enforceBucketing && !dest_tab.isIndexTable()) {
+        throw new 
SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
       }
+      else {
+        if(!enforceBucketing) {
+          partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, 
input, false);
+        }
+      }
+      enforceBucketing = true;
     }
 
-    if (enforceBucketing || enforceSorting) {
+    if (enforceBucketing) {
       ctx.setPartnCols(partnColsNoConvert);
     }
   }
@@ -6234,8 +6237,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         if (dpCtx.getSPPath() != null) {
           dest_path = new Path(dest_tab.getPath(), dpCtx.getSPPath());
         }
-        if ((dest_tab.getNumBuckets() > 0) &&
-            (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+        if ((dest_tab.getNumBuckets() > 0)) {
           dpCtx.setNumBuckets(dest_tab.getNumBuckets());
         }
       }
@@ -6542,12 +6544,10 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     RowSchema fsRS = new RowSchema(vecCol);
 
     // The output files of a FileSink can be merged if they are either not 
being written to a table
-    // or are being written to a table which is either not bucketed or enforce 
bucketing is not set
-    // and table the table is either not sorted or enforce sorting is not set
-    boolean canBeMerged = (dest_tab == null || !((dest_tab.getNumBuckets() > 0 
&&
-        conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING)) ||
-        (dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 0 &&
-        conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))));
+    // or are being written to a table which is not bucketed
+    // and table the table is not sorted
+    boolean canBeMerged = (dest_tab == null || !((dest_tab.getNumBuckets() > 
0) ||
+        (dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 
0)));
 
     // If this table is working with ACID semantics, turn off merging
     canBeMerged &= !destTableIsAcid;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index e13e6eb..db8b7d6 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -28,9 +28,9 @@ import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 /**
- * The LockManager is not ready, but for no-concurrency straight-line path we 
can 
+ * The LockManager is not ready, but for no-concurrency straight-line path we 
can
  * test AC=true, and AC=false with commit/rollback/exception and test 
resulting data.
- * 
+ *
  * Can also test, calling commit in AC=true mode, etc, toggling AC...
  */
 public class TestTxnCommands {
@@ -50,7 +50,7 @@ public class TestTxnCommands {
     ACIDTBL2("acidTbl2"),
     NONACIDORCTBL("nonAcidOrcTbl"),
     NONACIDORCTBL2("nonAcidOrcTbl2");
-    
+
     private final String name;
     @Override
     public String toString() {
@@ -70,7 +70,6 @@ public class TestTxnCommands {
     hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
     hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, 
TEST_WAREHOUSE_DIR);
     TxnDbUtil.setConfValues(hiveConf);
-    hiveConf.setBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING, true);
     TxnDbUtil.prepDb();
     File f = new File(TEST_WAREHOUSE_DIR);
     if (f.exists()) {
@@ -107,7 +106,7 @@ public class TestTxnCommands {
       FileUtils.deleteDirectory(new File(TEST_DATA_DIR));
     }
   }
-  @Test 
+  @Test
   public void testInsertOverwrite() throws Exception {
     runStatementOnDriver("insert overwrite table " + Table.NONACIDORCTBL + " 
select a,b from " + Table.NONACIDORCTBL2);
     runStatementOnDriver("create table " + Table.NONACIDORCTBL2 + "3(a int, b 
int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc 
TBLPROPERTIES ('transactional'='false')");
@@ -211,7 +210,7 @@ public class TestTxnCommands {
     rs0 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a,b");
     Assert.assertEquals("Can't see my own write", 1, rs0.size());
   }
-  @Test 
+  @Test
   public void testReadMyOwnInsert() throws Exception {
     runStatementOnDriver("set autocommit false");
     runStatementOnDriver("START TRANSACTION");
@@ -431,6 +430,7 @@ public class TestTxnCommands {
     return rs;
   }
   private static final class RowComp implements Comparator<int[]> {
+    @Override
     public int compare(int[] row1, int[] row2) {
       assert row1 != null && row2 != null && row1.length == row2.length;
       for(int i = 0; i < row1.length; i++) {
@@ -462,7 +462,7 @@ public class TestTxnCommands {
     sb.setLength(sb.length() - 1);//remove trailing comma
     return sb.toString();
   }
-  
+
   private List<String> runStatementOnDriver(String stmt) throws Exception {
     CommandProcessorResponse cpr = d.run(stmt);
     if(cpr.getResponseCode() != 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 5aa2500..8616eb0 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -62,7 +62,7 @@ public class TestTxnCommands2 {
     ACIDTBLPART("acidTblPart"),
     NONACIDORCTBL("nonAcidOrcTbl"),
     NONACIDPART("nonAcidPart");
-    
+
     private final String name;
     @Override
     public String toString() {
@@ -82,7 +82,6 @@ public class TestTxnCommands2 {
     hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
     hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, 
TEST_WAREHOUSE_DIR);
     TxnDbUtil.setConfValues(hiveConf);
-    hiveConf.setBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING, true);
     TxnDbUtil.prepDb();
     File f = new File(TEST_WAREHOUSE_DIR);
     if (f.exists()) {
@@ -330,6 +329,7 @@ public class TestTxnCommands2 {
     return rs;
   }
   private static final class RowComp implements Comparator<int[]> {
+    @Override
     public int compare(int[] row1, int[] row2) {
       assert row1 != null && row2 != null && row1.length == row2.length;
       for(int i = 0; i < row1.length; i++) {
@@ -361,7 +361,7 @@ public class TestTxnCommands2 {
     sb.setLength(sb.length() - 1);//remove trailing comma
     return sb.toString();
   }
-  
+
   private List<String> runStatementOnDriver(String stmt) throws Exception {
     CommandProcessorResponse cpr = d.run(stmt);
     if(cpr.getResponseCode() != 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 44ad8b0..c6a7fcb 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -51,7 +51,6 @@ public class TestDbTxnManager2 {
   public static void setUpClass() throws Exception {
     TxnDbUtil.setConfValues(conf);
     conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
-    conf.setBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING, true);
   }
   @Before
   public void setUp() throws Exception {
@@ -211,8 +210,8 @@ public class TestDbTxnManager2 {
     Assert.assertEquals("Unexpected number of locks found", 0, locks.size());
     checkCmdOnDriver(cpr);
   }
-  
-  
+
+
   private void checkLock(LockType type, LockState state, String db, String 
table, String partition, ShowLocksResponseElement l) {
     Assert.assertEquals(l.toString(),l.getType(), type);
     Assert.assertEquals(l.toString(),l.getState(), state);

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/acid_overwrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/acid_overwrite.q 
b/ql/src/test/queries/clientnegative/acid_overwrite.q
index 2e57a3c..9ccf31e 100644
--- a/ql/src/test/queries/clientnegative/acid_overwrite.q
+++ b/ql/src/test/queries/clientnegative/acid_overwrite.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets 
stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/archive_corrupt.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_corrupt.q 
b/ql/src/test/queries/clientnegative/archive_corrupt.q
index 130b37b..ed49688 100644
--- a/ql/src/test/queries/clientnegative/archive_corrupt.q
+++ b/ql/src/test/queries/clientnegative/archive_corrupt.q
@@ -1,7 +1,7 @@
 USE default;
 
 set hive.archive.enabled = true;
-set hive.enforce.bucketing = true;
+
 
 drop table tstsrcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q 
b/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
index f2de306..28c256e 100644
--- a/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
+++ b/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 -- check update without update priv

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q 
b/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
index c00c0eb..674ad1e 100644
--- a/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
+++ b/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 -- check update without update priv

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/delete_non_acid_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_non_acid_table.q 
b/ql/src/test/queries/clientnegative/delete_non_acid_table.q
index 6ae82ff..ec3d803 100644
--- a/ql/src/test/queries/clientnegative/delete_non_acid_table.q
+++ b/ql/src/test/queries/clientnegative/delete_non_acid_table.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing=true;
+
 
 create table not_an_acid_table2(a int, b varchar(128));
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/delete_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_not_bucketed.q 
b/ql/src/test/queries/clientnegative/delete_not_bucketed.q
index 80dffea..d575a8f 100644
--- a/ql/src/test/queries/clientnegative/delete_not_bucketed.q
+++ b/ql/src/test/queries/clientnegative/delete_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) stored as orc 
TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/delete_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_sorted.q 
b/ql/src/test/queries/clientnegative/delete_sorted.q
index fd8d579..9f82c1f 100644
--- a/ql/src/test/queries/clientnegative/delete_sorted.q
+++ b/ql/src/test/queries/clientnegative/delete_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) 
clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/insert_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_sorted.q 
b/ql/src/test/queries/clientnegative/insert_sorted.q
index 18c942a..cd1a69c 100644
--- a/ql/src/test/queries/clientnegative/insert_sorted.q
+++ b/ql/src/test/queries/clientnegative/insert_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by 
(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/insert_values_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_values_sorted.q 
b/ql/src/test/queries/clientnegative/insert_values_sorted.q
index 260e2fb..ee26402 100644
--- a/ql/src/test/queries/clientnegative/insert_values_sorted.q
+++ b/ql/src/test/queries/clientnegative/insert_values_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by 
(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/merge_negative_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/merge_negative_3.q 
b/ql/src/test/queries/clientnegative/merge_negative_3.q
index 6bc645e..f5eb231 100644
--- a/ql/src/test/queries/clientnegative/merge_negative_3.q
+++ b/ql/src/test/queries/clientnegative/merge_negative_3.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 create table srcpart2 (key int, value string) partitioned by (ds string) 
clustered by (key) sorted by (key) into 2 buckets stored as RCFILE;
 insert overwrite table srcpart2 partition (ds='2011') select * from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q 
b/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
index 880323c..c252d86 100644
--- a/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
+++ b/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+
+
 set hive.exec.reducers.max = 1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/smb_mapjoin_14.q 
b/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
index 54bfba0..4c93542 100644
--- a/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q 
b/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
index 7d11f45..8fbbd96 100644
--- a/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
+++ b/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
@@ -3,8 +3,8 @@ INTO 1 BUCKETS STORED AS RCFILE;
 create table table_desc(key int, value string) CLUSTERED BY (key) SORTED BY 
(key desc) 
 INTO 1 BUCKETS STORED AS RCFILE;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+
+
 
 insert overwrite table table_asc select key, value from src; 
 insert overwrite table table_desc select key, value from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_bucket_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_bucket_col.q 
b/ql/src/test/queries/clientnegative/update_bucket_col.q
index 515e024..c471a4c 100644
--- a/ql/src/test/queries/clientnegative/update_bucket_col.q
+++ b/ql/src/test/queries/clientnegative/update_bucket_col.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table foo(a int, b varchar(128)) partitioned by (ds string) clustered 
by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_no_such_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_no_such_table.q 
b/ql/src/test/queries/clientnegative/update_no_such_table.q
index 07239cf..dffbab4 100644
--- a/ql/src/test/queries/clientnegative/update_no_such_table.q
+++ b/ql/src/test/queries/clientnegative/update_no_such_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 update no_such_table set b = 'fred';

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_non_acid_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_non_acid_table.q 
b/ql/src/test/queries/clientnegative/update_non_acid_table.q
index dd0b01e..da46141 100644
--- a/ql/src/test/queries/clientnegative/update_non_acid_table.q
+++ b/ql/src/test/queries/clientnegative/update_non_acid_table.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing=true;
+
 
 create table not_an_acid_table(a int, b varchar(128));
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_not_bucketed.q 
b/ql/src/test/queries/clientnegative/update_not_bucketed.q
index 8512fa7..d7d0da4 100644
--- a/ql/src/test/queries/clientnegative/update_not_bucketed.q
+++ b/ql/src/test/queries/clientnegative/update_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) partitioned by (ds 
string) stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_partition_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_partition_col.q 
b/ql/src/test/queries/clientnegative/update_partition_col.q
index e9c60cc..78d381e 100644
--- a/ql/src/test/queries/clientnegative/update_partition_col.q
+++ b/ql/src/test/queries/clientnegative/update_partition_col.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table foo(a int, b varchar(128)) partitioned by (ds string) clustered 
by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_sorted.q 
b/ql/src/test/queries/clientnegative/update_sorted.q
index 917c3b5..f9e5db5 100644
--- a/ql/src/test/queries/clientnegative/update_sorted.q
+++ b/ql/src/test/queries/clientnegative/update_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by 
(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/acid_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_join.q 
b/ql/src/test/queries/clientpositive/acid_join.q
index 2e6aeae..dca4d7d 100644
--- a/ql/src/test/queries/clientpositive/acid_join.q
+++ b/ql/src/test/queries/clientpositive/acid_join.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- This test checks that a join with tables with two different buckets send 
the right bucket info to each table.
 create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 
buckets stored as orc TBLPROPERTIES ("transactional"="true"); 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/acid_vectorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization.q 
b/ql/src/test/queries/clientpositive/acid_vectorization.q
index 4b11412..514d3fa 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_partition.q 
b/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
index 00449bb..8dd1e09 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 CREATE TABLE acid_vectorized_part(a INT, b STRING) partitioned by (ds string) 
CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES 
('transactional'='true');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/acid_vectorization_project.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_project.q 
b/ql/src/test/queries/clientpositive/acid_vectorization_project.q
index a44b57a..2a5f59a 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_project.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_project.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 
BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q 
b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
deleted file mode 100644
index 627fcc1..0000000
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
+++ /dev/null
@@ -1,59 +0,0 @@
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
-create table tst1(key string, value string) partitioned by (ds string) 
clustered by (key) into 10 buckets;
-
-alter table tst1 clustered by (key) into 8 buckets;
-
-describe formatted tst1;
-
-set hive.enforce.bucketing=true;
-insert overwrite table tst1 partition (ds='1') select key, value from src;
-
-describe formatted tst1 partition (ds = '1');
-
--- Test changing bucket number
-
-alter table tst1 clustered by (key) into 12 buckets;
-
-insert overwrite table tst1 partition (ds='1') select key, value from src;
-
-describe formatted tst1 partition (ds = '1');
-
-describe formatted tst1;
-
--- Test changing bucket number of (table/partition)
-
-alter table tst1 into 4 buckets;
-
-describe formatted tst1;
-
-describe formatted tst1 partition (ds = '1');
-
-alter table tst1 partition (ds = '1') into 6 buckets;
-
-describe formatted tst1;
-
-describe formatted tst1 partition (ds = '1');
-
--- Test adding sort order
-
-alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets;
-
-describe formatted tst1;
-
--- Test changing sort order
-
-alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets;
-
-describe formatted tst1;
-
--- Test removing test order
-
-alter table tst1 clustered by (value) into 12 buckets;
-
-describe formatted tst1;
-
--- Test removing buckets
-
-alter table tst1 not clustered;
-
-describe formatted tst1;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q 
b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
deleted file mode 100644
index 2f26de8..0000000
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
+++ /dev/null
@@ -1,85 +0,0 @@
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- Tests that when overwriting a partition in a table after altering the 
bucketing/sorting metadata
--- the partition metadata is updated as well.
-
-CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING);
-
-DESCRIBE FORMATTED tst1;
-
-SET hive.enforce.bucketing=true;
-SET hive.enforce.sorting=true;
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test an unbucketed partition gets converted to bucketed
-ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test an unsorted partition gets converted to sorted
-ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the bucket columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the number of buckets
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the sort columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the sort order
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test a sorted partition gets converted to unsorted
-ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test a bucketed partition gets converted to unbucketed
-ALTER TABLE tst1 NOT CLUSTERED;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q 
b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
index 2c2e184..15a88bb 100644
--- 
a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
+++ 
b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
@@ -1,4 +1,3 @@
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
 -- Tests that when overwriting a partition in a table after altering the 
bucketing/sorting metadata
 -- the partition metadata is updated as well.
 
@@ -6,8 +5,8 @@ CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds 
STRING);
 
 DESCRIBE FORMATTED tst1;
 
-SET hive.enforce.bucketing=true;
-SET hive.enforce.sorting=true;
+
+
 INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
 
 DESCRIBE FORMATTED tst1 PARTITION (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q 
b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
index 439f351..7d523d9 100644
--- 
a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
+++ 
b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
@@ -5,7 +5,7 @@ alter table tst1 clustered by (key) into 8 buckets;
 
 describe formatted tst1;
 
-set hive.enforce.bucketing=true;
+
 insert overwrite table tst1 partition (ds='1') select key, value from src;
 
 describe formatted tst1 partition (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q 
b/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
index 316276a..b046f97 100644
--- a/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
+++ b/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
@@ -1,5 +1,5 @@
 set hive.archive.enabled = true;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.submitviachild=true;
 set hive.exec.submit.local.task.via.child=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/archive_multi.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/archive_multi.q 
b/ql/src/test/queries/clientpositive/archive_multi.q
index 1004aca..0259a3e 100644
--- a/ql/src/test/queries/clientpositive/archive_multi.q
+++ b/ql/src/test/queries/clientpositive/archive_multi.q
@@ -1,5 +1,5 @@
 set hive.archive.enabled = true;
-set hive.enforce.bucketing = true;
+;
 
 create database ac_test;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/authorization_delete.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_delete.q 
b/ql/src/test/queries/clientpositive/authorization_delete.q
index d96e6ab..fe1a9ac 100644
--- a/ql/src/test/queries/clientpositive/authorization_delete.q
+++ b/ql/src/test/queries/clientpositive/authorization_delete.q
@@ -4,7 +4,7 @@ set 
hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 set user.name=user1;
 -- current user has been set (comment line before the set cmd is resulting in 
parse error!!)

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/authorization_delete_own_table.q 
b/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
index 7abdc12..34dfa6a 100644
--- a/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
+++ b/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 set user.name=user1;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/authorization_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_update.q 
b/ql/src/test/queries/clientpositive/authorization_update.q
index da1054e..5e57904 100644
--- a/ql/src/test/queries/clientpositive/authorization_update.q
+++ b/ql/src/test/queries/clientpositive/authorization_update.q
@@ -4,7 +4,7 @@ set 
hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 set user.name=user1;
 -- current user has been set (comment line before the set cmd is resulting in 
parse error!!)

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/authorization_update_own_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/authorization_update_own_table.q 
b/ql/src/test/queries/clientpositive/authorization_update_own_table.q
index ace1ce2..e3292d2 100644
--- a/ql/src/test/queries/clientpositive/authorization_update_own_table.q
+++ b/ql/src/test/queries/clientpositive/authorization_update_own_table.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 set user.name=user1;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q 
b/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
index 699777e..4dca15b 100644
--- a/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
index c07dd23..77b2282 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
index f35fec1..1c868dc 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
index eabeff0..3fa1463 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
index a553d93..64b3e5f 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
index cb244cf..83b67f8 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
@@ -1,8 +1,8 @@
 set hive.auto.convert.join=true;
 
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
index 0ddf378..33fe283 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 set hive.explain.user=false;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
index 9eb85d3..917aec9 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket1.q 
b/ql/src/test/queries/clientpositive/bucket1.q
index 0154b4e..6a59465 100644
--- a/ql/src/test/queries/clientpositive/bucket1.q
+++ b/ql/src/test/queries/clientpositive/bucket1.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 200;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket2.q 
b/ql/src/test/queries/clientpositive/bucket2.q
index ecd7e53..4e63859 100644
--- a/ql/src/test/queries/clientpositive/bucket2.q
+++ b/ql/src/test/queries/clientpositive/bucket2.q
@@ -1,5 +1,5 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket3.q 
b/ql/src/test/queries/clientpositive/bucket3.q
index 7b7a9c3..b11e4da 100644
--- a/ql/src/test/queries/clientpositive/bucket3.q
+++ b/ql/src/test/queries/clientpositive/bucket3.q
@@ -1,5 +1,5 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket4.q 
b/ql/src/test/queries/clientpositive/bucket4.q
index 1b49c7a..7cd962d 100644
--- a/ql/src/test/queries/clientpositive/bucket4.q
+++ b/ql/src/test/queries/clientpositive/bucket4.q
@@ -1,7 +1,7 @@
 set hive.explain.user=false;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY 
(key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket5.q 
b/ql/src/test/queries/clientpositive/bucket5.q
index 877f8a5..0b3bcc5 100644
--- a/ql/src/test/queries/clientpositive/bucket5.q
+++ b/ql/src/test/queries/clientpositive/bucket5.q
@@ -1,6 +1,6 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles = true;
 set hive.merge.mapredfiles = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket6.q 
b/ql/src/test/queries/clientpositive/bucket6.q
index fb55787..a12f6bd 100644
--- a/ql/src/test/queries/clientpositive/bucket6.q
+++ b/ql/src/test/queries/clientpositive/bucket6.q
@@ -1,7 +1,7 @@
 CREATE TABLE src_bucket(key STRING, value STRING) CLUSTERED BY (key) SORTED BY 
(key) INTO 2 BUCKETS;
 
-set hive.enforce.sorting = true;
-set hive.enforce.bucketing = true;
+
+;
 
 explain
 insert into table src_bucket select key,value from srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_many.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_many.q 
b/ql/src/test/queries/clientpositive/bucket_many.q
index 1f0b795..8a64ff1 100644
--- a/ql/src/test/queries/clientpositive/bucket_many.q
+++ b/ql/src/test/queries/clientpositive/bucket_many.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set mapred.reduce.tasks = 16;
 
 create table bucket_many(key int, value string) clustered by (key) into 256 
buckets;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_1.q 
b/ql/src/test/queries/clientpositive/bucket_map_join_1.q
index 6bdb09e..deae460 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_1.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_1.q
@@ -1,8 +1,8 @@
 drop table table1;
 drop table table2;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 
 create table table1(key string, value string) clustered by (key, value)
 sorted by (key, value) into 1 BUCKETS stored as textfile;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_2.q 
b/ql/src/test/queries/clientpositive/bucket_map_join_2.q
index 07f6d15..f416706 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_2.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_2.q
@@ -1,8 +1,8 @@
 drop table table1;
 drop table table2;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 
 create table table1(key string, value string) clustered by (key, value)
 sorted by (key desc, value desc) into 1 BUCKETS stored as textfile;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q 
b/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
index 1ca20e4..4b75685 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS
@@ -17,8 +17,8 @@ select * from src where key < 10;
 insert overwrite table tbl3
 select * from src where key < 10;
 
-set hive.enforce.bucketing = false;
-set hive.enforce.sorting = false;
+;
+
 set hive.exec.reducers.max = 100;
 
 set hive.auto.convert.join=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q 
b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
index 8546e78..40dad17 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
@@ -15,8 +15,8 @@ load data local inpath '../../data/files/srcbucket21.txt' 
INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q 
b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
index 2f968bd..1e7db5e 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
@@ -15,8 +15,8 @@ load data local inpath '../../data/files/srcbucket21.txt' 
INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_num_reducers.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_num_reducers.q 
b/ql/src/test/queries/clientpositive/bucket_num_reducers.q
index 37ae6cc..06f334e 100644
--- a/ql/src/test/queries/clientpositive/bucket_num_reducers.q
+++ b/ql/src/test/queries/clientpositive/bucket_num_reducers.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set hive.exec.mode.local.auto=false;
 set mapred.reduce.tasks = 10;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q 
b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
index 8c64d60..48e5f01 100644
--- a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
+++ b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set hive.exec.mode.local.auto=false;
 set hive.exec.reducers.max = 2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketmapjoin13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin13.q 
b/ql/src/test/queries/clientpositive/bucketmapjoin13.q
index f01c43e..fd2f22a 100644
--- a/ql/src/test/queries/clientpositive/bucketmapjoin13.q
+++ b/ql/src/test/queries/clientpositive/bucketmapjoin13.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max=1;
 
 CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY 
(part STRING) 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketmapjoin6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin6.q 
b/ql/src/test/queries/clientpositive/bucketmapjoin6.q
index a0ef371..9da0619 100644
--- a/ql/src/test/queries/clientpositive/bucketmapjoin6.q
+++ b/ql/src/test/queries/clientpositive/bucketmapjoin6.q
@@ -7,8 +7,8 @@ create table tmp1 (a string, b string) clustered by (a) sorted 
by (a) into 10 bu
 create table tmp2 (a string, b string) clustered by (a) sorted by (a) into 10 
buckets;
 
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max=1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q 
b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
index 8cc308f..8f8d625 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q 
b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
index 9ecd2c4..a66378c 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q 
b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
index 91e97de..6027707 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q 
b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
index 623b22b..0f1e8c6 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q 
b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
index 205a450..6f4becd 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q 
b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
index a4e84f8..a609422 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q 
b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
index f597884..b8370c6 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q 
b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
index 95a9a64..b34f8d1 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q 
b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
index 096ae10..b906db2 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
@@ -1,7 +1,7 @@
 set hive.cbo.returnpath.hiveop=true;
 set hive.stats.fetch.column.stats=true;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/combine3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/combine3.q 
b/ql/src/test/queries/clientpositive/combine3.q
index c9afc91..9e5809b 100644
--- a/ql/src/test/queries/clientpositive/combine3.q
+++ b/ql/src/test/queries/clientpositive/combine3.q
@@ -20,7 +20,7 @@ desc extended combine_3_srcpart_seq_rc 
partition(ds="2010-08-03", hr="001");
 
 select key, value, ds, hr from combine_3_srcpart_seq_rc where ds="2010-08-03" 
order by key, hr limit 30;
 
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 drop table bucket3_1;

Reply via email to