You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2015/11/25 02:09:48 UTC

[1/4] hive git commit: HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)

Repository: hive
Updated Branches:
  refs/heads/master 1b6600de0 -> 5562fae73


http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
index eeb18b0..93a7ca4 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
@@ -139,6 +139,8 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -165,37 +167,14 @@ STAGE PLANS:
                         expressions: _col0 (type: int), _col7 (type: string)
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          GlobalTableId: 1
-#### A masked pattern was here ####
-                          NumFilesPerFileSink: 1
-                          Static Partition Specification: ds=1/
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
                           Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                          table:
-                              input format: org.apache.hadoop.mapred.TextInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                              properties:
-                                SORTBUCKETCOLSPREFIX TRUE
-                                bucket_count 16
-                                bucket_field_name key
-                                columns key,value
-                                columns.comments 
-                                columns.types int:string
-#### A masked pattern was here ####
-                                name default.test_table3
-                                partition_columns ds
-                                partition_columns.types string
-                                serialization.ddl struct test_table3 { i32 key, string value}
-                                serialization.format 1
-                                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                              name: default.test_table3
-                          TotalFiles: 1
-                          GatherStats: true
-                          MultiFileSpray: false
+                          tag: -1
+                          value expressions: _col1 (type: string)
+                          auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -249,6 +228,44 @@ STAGE PLANS:
                   name: default.test_table1
             Truncated Path -> Alias:
               /test_table1/ds=1 [a]
+        Reducer 2 
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 16
+                  Static Partition Specification: ds=1/
+                  Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        SORTBUCKETCOLSPREFIX TRUE
+                        bucket_count 16
+                        bucket_field_name key
+                        columns key,value
+                        columns.comments 
+                        columns.types int:string
+#### A masked pattern was here ####
+                        name default.test_table3
+                        partition_columns ds
+                        partition_columns.types string
+                        serialization.ddl struct test_table3 { i32 key, string value}
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+                  TotalFiles: 16
+                  GatherStats: true
+                  MultiFileSpray: true
 
   Stage: Stage-0
     Move Operator
@@ -406,6 +423,8 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -432,37 +451,14 @@ STAGE PLANS:
                         expressions: _col0 (type: int), concat(_col1, _col7) (type: string)
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 1696 Data size: 18097 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          GlobalTableId: 1
-#### A masked pattern was here ####
-                          NumFilesPerFileSink: 1
-                          Static Partition Specification: ds=2/
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
                           Statistics: Num rows: 1696 Data size: 18097 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                          table:
-                              input format: org.apache.hadoop.mapred.TextInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                              properties:
-                                SORTBUCKETCOLSPREFIX TRUE
-                                bucket_count 16
-                                bucket_field_name key
-                                columns key,value
-                                columns.comments 
-                                columns.types int:string
-#### A masked pattern was here ####
-                                name default.test_table3
-                                partition_columns ds
-                                partition_columns.types string
-                                serialization.ddl struct test_table3 { i32 key, string value}
-                                serialization.format 1
-                                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                              name: default.test_table3
-                          TotalFiles: 1
-                          GatherStats: true
-                          MultiFileSpray: false
+                          tag: -1
+                          value expressions: _col1 (type: string)
+                          auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -516,6 +512,44 @@ STAGE PLANS:
                   name: default.test_table3
             Truncated Path -> Alias:
               /test_table3/ds=1 [a]
+        Reducer 2 
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1696 Data size: 18097 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 16
+                  Static Partition Specification: ds=2/
+                  Statistics: Num rows: 1696 Data size: 18097 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        SORTBUCKETCOLSPREFIX TRUE
+                        bucket_count 16
+                        bucket_field_name key
+                        columns key,value
+                        columns.comments 
+                        columns.types int:string
+#### A masked pattern was here ####
+                        name default.test_table3
+                        partition_columns ds
+                        partition_columns.types string
+                        serialization.ddl struct test_table3 { i32 key, string value}
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+                  TotalFiles: 16
+                  GatherStats: true
+                  MultiFileSpray: true
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/spark/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats9.q.out b/ql/src/test/results/clientpositive/spark/stats9.q.out
index 7eae829..70175b2 100644
--- a/ql/src/test/results/clientpositive/spark/stats9.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats9.q.out
@@ -65,7 +65,7 @@ Retention:          	0
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	2                   
 	numRows             	1000                
 	rawDataSize         	10603               
 	totalSize           	11603               

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats9.q.out b/ql/src/test/results/clientpositive/stats9.q.out
index e7c7743..e00fc80 100644
--- a/ql/src/test/results/clientpositive/stats9.q.out
+++ b/ql/src/test/results/clientpositive/stats9.q.out
@@ -62,7 +62,7 @@ Retention:          	0
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	2                   
 	numRows             	1000                
 	rawDataSize         	10603               
 	totalSize           	11603               

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
index bc46852..1156feb 100644
--- a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
+++ b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
@@ -904,10 +904,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -947,10 +947,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1002,10 +1002,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1045,10 +1045,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1100,10 +1100,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1143,10 +1143,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1249,10 +1249,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1292,10 +1292,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 


[4/4] hive git commit: HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)

Posted by ha...@apache.org.
HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5562fae7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5562fae7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5562fae7

Branch: refs/heads/master
Commit: 5562fae73e417c81a193c1e6deb6388d3fef746b
Parents: 1b6600d
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Tue Nov 24 17:08:35 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue Nov 24 17:08:35 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   8 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   1 +
 .../apache/hadoop/hive/ql/exec/Utilities.java   |   3 +-
 .../optimizer/SortedDynPartitionOptimizer.java  |  86 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  54 +-
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |  14 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |   6 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      |   5 +-
 .../queries/clientnegative/acid_overwrite.q     |   2 +-
 .../queries/clientnegative/archive_corrupt.q    |   2 +-
 .../authorization_delete_nodeletepriv.q         |   2 +-
 .../authorization_update_noupdatepriv.q         |   2 +-
 .../clientnegative/delete_non_acid_table.q      |   2 +-
 .../clientnegative/delete_not_bucketed.q        |   2 +-
 .../test/queries/clientnegative/delete_sorted.q |   2 +-
 .../test/queries/clientnegative/insert_sorted.q |   2 +-
 .../clientnegative/insert_values_sorted.q       |   2 +-
 .../queries/clientnegative/merge_negative_3.q   |   4 +-
 .../queries/clientnegative/smb_bucketmapjoin.q  |   4 +-
 .../queries/clientnegative/smb_mapjoin_14.q     |   4 +-
 .../sortmerge_mapjoin_mismatch_1.q              |   4 +-
 .../queries/clientnegative/update_bucket_col.q  |   2 +-
 .../clientnegative/update_no_such_table.q       |   2 +-
 .../clientnegative/update_non_acid_table.q      |   2 +-
 .../clientnegative/update_not_bucketed.q        |   2 +-
 .../clientnegative/update_partition_col.q       |   2 +-
 .../test/queries/clientnegative/update_sorted.q |   2 +-
 ql/src/test/queries/clientpositive/acid_join.q  |   2 +-
 .../queries/clientpositive/acid_vectorization.q |   2 +-
 .../acid_vectorization_partition.q              |   2 +-
 .../clientpositive/acid_vectorization_project.q |   2 +-
 .../alter_numbuckets_partitioned_table.q        |  59 --
 .../alter_numbuckets_partitioned_table2.q       |  85 --
 .../alter_numbuckets_partitioned_table2_h23.q   |   5 +-
 .../alter_numbuckets_partitioned_table_h23.q    |   2 +-
 .../clientpositive/archive_excludeHadoop20.q    |   2 +-
 .../test/queries/clientpositive/archive_multi.q |   2 +-
 .../clientpositive/authorization_delete.q       |   2 +-
 .../authorization_delete_own_table.q            |   2 +-
 .../clientpositive/authorization_update.q       |   2 +-
 .../authorization_update_own_table.q            |   2 +-
 .../clientpositive/auto_smb_mapjoin_14.q        |   4 +-
 .../clientpositive/auto_sortmerge_join_10.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_13.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_14.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_15.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_16.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_6.q      |   4 +-
 .../clientpositive/auto_sortmerge_join_9.q      |   4 +-
 ql/src/test/queries/clientpositive/bucket1.q    |   2 +-
 ql/src/test/queries/clientpositive/bucket2.q    |   2 +-
 ql/src/test/queries/clientpositive/bucket3.q    |   2 +-
 ql/src/test/queries/clientpositive/bucket4.q    |   4 +-
 ql/src/test/queries/clientpositive/bucket5.q    |   4 +-
 ql/src/test/queries/clientpositive/bucket6.q    |   4 +-
 .../test/queries/clientpositive/bucket_many.q   |   2 +-
 .../queries/clientpositive/bucket_map_join_1.q  |   4 +-
 .../queries/clientpositive/bucket_map_join_2.q  |   4 +-
 .../clientpositive/bucket_map_join_spark4.q     |   8 +-
 .../clientpositive/bucket_map_join_tez1.q       |   4 +-
 .../clientpositive/bucket_map_join_tez2.q       |   4 +-
 .../clientpositive/bucket_num_reducers.q        |   2 +-
 .../clientpositive/bucket_num_reducers2.q       |   2 +-
 .../queries/clientpositive/bucketmapjoin13.q    |   4 +-
 .../queries/clientpositive/bucketmapjoin6.q     |   4 +-
 .../bucketsortoptimize_insert_1.q               |   4 +-
 .../bucketsortoptimize_insert_2.q               |   4 +-
 .../bucketsortoptimize_insert_3.q               |   4 +-
 .../bucketsortoptimize_insert_4.q               |   4 +-
 .../bucketsortoptimize_insert_5.q               |   4 +-
 .../bucketsortoptimize_insert_6.q               |   4 +-
 .../bucketsortoptimize_insert_7.q               |   4 +-
 .../bucketsortoptimize_insert_8.q               |   4 +-
 .../queries/clientpositive/cbo_rp_auto_join1.q  |   4 +-
 ql/src/test/queries/clientpositive/combine3.q   |   2 +-
 .../clientpositive/delete_all_non_partitioned.q |   2 +-
 .../clientpositive/delete_all_partitioned.q     |   2 +-
 .../queries/clientpositive/delete_orig_table.q  |   2 +-
 .../queries/clientpositive/delete_tmp_table.q   |   2 +-
 .../clientpositive/delete_where_no_match.q      |   2 +-
 .../delete_where_non_partitioned.q              |   2 +-
 .../clientpositive/delete_where_partitioned.q   |   2 +-
 .../clientpositive/delete_whole_partition.q     |   2 +-
 .../disable_merge_for_bucketing.q               |   2 +-
 .../clientpositive/dynpart_sort_opt_bucketing.q |   8 +-
 .../dynpart_sort_opt_vectorization.q            |   8 +-
 .../clientpositive/dynpart_sort_optimization.q  |   8 +-
 .../clientpositive/dynpart_sort_optimization2.q |   4 +-
 .../dynpart_sort_optimization_acid.q            |   2 +-
 .../encryption_insert_partition_dynamic.q       |   2 +-
 .../encryption_insert_partition_static.q        |   2 +-
 .../test/queries/clientpositive/enforce_order.q |   2 +-
 .../test/queries/clientpositive/explainuser_1.q |   6 +-
 .../test/queries/clientpositive/explainuser_2.q |   4 +-
 .../test/queries/clientpositive/explainuser_3.q |   6 +-
 .../queries/clientpositive/groupby_sort_1.q     |   4 +-
 .../queries/clientpositive/groupby_sort_10.q    |   4 +-
 .../queries/clientpositive/groupby_sort_11.q    |   4 +-
 .../queries/clientpositive/groupby_sort_1_23.q  |   4 +-
 .../queries/clientpositive/groupby_sort_2.q     |   4 +-
 .../queries/clientpositive/groupby_sort_3.q     |   4 +-
 .../queries/clientpositive/groupby_sort_4.q     |   4 +-
 .../queries/clientpositive/groupby_sort_5.q     |   4 +-
 .../queries/clientpositive/groupby_sort_6.q     |   4 +-
 .../queries/clientpositive/groupby_sort_7.q     |   4 +-
 .../queries/clientpositive/groupby_sort_8.q     |   4 +-
 .../queries/clientpositive/groupby_sort_9.q     |   4 +-
 .../clientpositive/groupby_sort_skew_1.q        |   4 +-
 .../clientpositive/groupby_sort_skew_1_23.q     |   4 +-
 .../clientpositive/groupby_sort_test_1.q        |   4 +-
 .../infer_bucket_sort_bucketed_table.q          |   4 +-
 .../infer_bucket_sort_map_operators.q           |   4 +-
 .../insert_acid_dynamic_partition.q             |   2 +-
 .../clientpositive/insert_acid_not_bucketed.q   |   2 +-
 .../clientpositive/insert_into_with_schema2.q   |   2 +-
 .../clientpositive/insert_nonacid_from_acid.q   |   2 +-
 .../queries/clientpositive/insert_orig_table.q  |   2 +-
 .../clientpositive/insert_update_delete.q       |   2 +-
 .../insert_values_acid_not_bucketed.q           |   2 +-
 .../insert_values_dynamic_partitioned.q         |   2 +-
 .../insert_values_non_partitioned.q             |   2 +-
 .../clientpositive/insert_values_orig_table.q   |   2 +-
 .../clientpositive/insert_values_partitioned.q  |   2 +-
 .../clientpositive/insert_values_tmp_table.q    |   2 +-
 .../clientpositive/insertoverwrite_bucket.q     |   4 +-
 .../test/queries/clientpositive/join_nullsafe.q |   4 +-
 .../queries/clientpositive/load_dyn_part2.q     |   2 +-
 ql/src/test/queries/clientpositive/mergejoin.q  |   4 +-
 .../queries/clientpositive/orc_empty_files.q    |   2 +-
 .../partition_wise_fileformat14.q               |   4 +-
 .../test/queries/clientpositive/quotedid_smb.q  |   4 +-
 .../queries/clientpositive/reduce_deduplicate.q |   2 +-
 ql/src/test/queries/clientpositive/sample10.q   |   2 +-
 .../test/queries/clientpositive/smb_mapjoin9.q  |   4 +-
 .../queries/clientpositive/smb_mapjoin_11.q     |   8 +-
 .../queries/clientpositive/smb_mapjoin_12.q     |   8 +-
 .../queries/clientpositive/smb_mapjoin_13.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_14.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_15.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_16.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_17.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_18.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_19.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_20.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_21.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_22.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_25.q     |   4 +-
 .../test/queries/clientpositive/smb_mapjoin_6.q |   4 +-
 .../test/queries/clientpositive/smb_mapjoin_7.q |   4 +-
 .../test/queries/clientpositive/smb_mapjoin_8.q |   4 +-
 .../clientpositive/sort_merge_join_desc_1.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_2.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_3.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_4.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_5.q     |   4 +-
 .../clientpositive/sort_merge_join_desc_6.q     |   4 +-
 .../clientpositive/sort_merge_join_desc_7.q     |   4 +-
 .../clientpositive/sort_merge_join_desc_8.q     |   2 +-
 ql/src/test/queries/clientpositive/stats10.q    |   2 +-
 .../clientpositive/tez_bmj_schema_evolution.q   |   4 +-
 ql/src/test/queries/clientpositive/tez_fsstat.q |   4 +-
 ql/src/test/queries/clientpositive/tez_smb_1.q  |   4 +-
 .../test/queries/clientpositive/tez_smb_empty.q |   4 +-
 .../test/queries/clientpositive/tez_smb_main.q  |   4 +-
 .../queries/clientpositive/transform_acid.q     |   2 +-
 .../clientpositive/truncate_column_buckets.q    |   2 +-
 .../update_after_multiple_inserts.q             |   2 +-
 .../clientpositive/update_all_non_partitioned.q |   2 +-
 .../clientpositive/update_all_partitioned.q     |   2 +-
 .../queries/clientpositive/update_all_types.q   |   2 +-
 .../queries/clientpositive/update_orig_table.q  |   2 +-
 .../queries/clientpositive/update_tmp_table.q   |   2 +-
 .../queries/clientpositive/update_two_cols.q    |   2 +-
 .../clientpositive/update_where_no_match.q      |   2 +-
 .../update_where_non_partitioned.q              |   2 +-
 .../clientpositive/update_where_partitioned.q   |   2 +-
 .../clientpositive/vector_auto_smb_mapjoin_14.q |   4 +-
 .../test/queries/clientpositive/vector_bucket.q |   2 +-
 .../alter_numbuckets_partitioned_table.q.out    | 553 ------------
 .../alter_numbuckets_partitioned_table2.q.out   | 851 -------------------
 ...lter_numbuckets_partitioned_table2_h23.q.out |   6 +-
 ql/src/test/results/clientpositive/cp_sel.q.out |  81 +-
 .../clientpositive/index_auto_update.q.out      |   2 +-
 .../insert_into_with_schema2.q.out              |  12 +-
 .../results/clientpositive/orc_analyze.q.out    |  48 +-
 .../results/clientpositive/smb_mapjoin_11.q.out | 217 ++---
 .../clientpositive/spark/smb_mapjoin_11.q.out   |  74 +-
 .../clientpositive/spark/smb_mapjoin_12.q.out   | 154 ++--
 .../results/clientpositive/spark/stats9.q.out   |   2 +-
 ql/src/test/results/clientpositive/stats9.q.out |   2 +-
 .../clientpositive/tez/orc_analyze.q.out        |  48 +-
 191 files changed, 690 insertions(+), 2220 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index fffedd9..2bd850d 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1198,12 +1198,8 @@ public class HiveConf extends Configuration {
         "The log level to use for tasks executing as part of the DAG.\n" +
         "Used only if hive.tez.java.opts is used to configure Java options."),
 
-    HIVEENFORCEBUCKETING("hive.enforce.bucketing", false,
-        "Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced."),
-    HIVEENFORCESORTING("hive.enforce.sorting", false,
-        "Whether sorting is enforced. If true, while inserting into the table, sorting is enforced."),
     HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
-        "If hive.enforce.bucketing or hive.enforce.sorting is true, don't create a reducer for enforcing \n" +
+        "Don't create a reducer for enforcing \n" +
         "bucketing/sorting for queries of the form: \n" +
         "insert overwrite table T2 select * from T1;\n" +
         "where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."),
@@ -3082,9 +3078,7 @@ public class HiveConf extends Configuration {
     ConfVars.DROPIGNORESNONEXISTENT.varname,
     ConfVars.HIVECOUNTERGROUP.varname,
     ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname,
-    ConfVars.HIVEENFORCEBUCKETING.varname,
     ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname,
-    ConfVars.HIVEENFORCESORTING.varname,
     ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname,
     ConfVars.HIVEEXPREVALUATIONCACHE.varname,
     ConfVars.HIVEHASHTABLELOADFACTOR.varname,

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 892587a..8a47605 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -423,6 +423,7 @@ public enum ErrorMsg {
   IMPORT_INTO_STRICT_REPL_TABLE(10303,"Non-repl import disallowed against table that is a destination of replication."),
   CTAS_LOCATION_NONEMPTY(10304, "CREATE-TABLE-AS-SELECT cannot create table with location to a non-empty directory."),
   CTAS_CREATES_VOID_TYPE(10305, "CREATE-TABLE-AS-SELECT creates a VOID type, please use CAST to specify the type, near field: "),
+  TBL_SORTED_NOT_BUCKETED(10306, "Destination table {0} found to be sorted but not bucketed.", true),
   //========================== 20000 range starts here ========================//
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
   SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. "

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 8b8cf6d..4eb46ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2159,8 +2159,7 @@ public final class Utilities {
       FileStatus[] items = fs.listStatus(path);
       taskIDToFile = removeTempOrDuplicateFiles(items, fs);
       if(taskIDToFile != null && taskIDToFile.size() > 0 && conf != null && conf.getTable() != null
-          && (conf.getTable().getNumBuckets() > taskIDToFile.size())
-          && (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+          && (conf.getTable().getNumBuckets() > taskIDToFile.size())) {
           // get the missing buckets and generate empty buckets for non-dynamic partition
         String taskID1 = taskIDToFile.keySet().iterator().next();
         Path bucketPath = taskIDToFile.values().iterator().next().getPath();

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
index e2a0eae..c3553a5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
@@ -29,8 +29,6 @@ import java.util.Stack;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.ObjectPair;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -76,8 +74,7 @@ import com.google.common.collect.Maps;
  * When dynamic partitioning (with or without bucketing and sorting) is enabled, this optimization
  * sorts the records on partition, bucket and sort columns respectively before inserting records
  * into the destination table. This enables reducers to keep only one record writer all the time
- * thereby reducing the the memory pressure on the reducers. This optimization will force a reducer
- * even when hive.enforce.bucketing and hive.enforce.sorting is set to false.
+ * thereby reducing the the memory pressure on the reducers.
  */
 public class SortedDynPartitionOptimizer implements Transform {
 
@@ -270,58 +267,53 @@ public class SortedDynPartitionOptimizer implements Transform {
     // Remove RS and SEL introduced by enforce bucketing/sorting config
     // Convert PARENT -> RS -> SEL -> FS to PARENT -> FS
     private boolean removeRSInsertedByEnforceBucketing(FileSinkOperator fsOp) {
-      HiveConf hconf = parseCtx.getConf();
-      boolean enforceBucketing = HiveConf.getBoolVar(hconf, ConfVars.HIVEENFORCEBUCKETING);
-      boolean enforceSorting = HiveConf.getBoolVar(hconf, ConfVars.HIVEENFORCESORTING);
-      if (enforceBucketing || enforceSorting) {
-        Set<ReduceSinkOperator> reduceSinks = OperatorUtils.findOperatorsUpstream(fsOp,
-            ReduceSinkOperator.class);
-        Operator<? extends OperatorDesc> rsToRemove = null;
-        List<ReduceSinkOperator> rsOps = parseCtx
-            .getReduceSinkOperatorsAddedByEnforceBucketingSorting();
-        boolean found = false;
-
-        // iterate through all RS and locate the one introduce by enforce bucketing
-        for (ReduceSinkOperator reduceSink : reduceSinks) {
-          for (ReduceSinkOperator rsOp : rsOps) {
-            if (reduceSink.equals(rsOp)) {
-              rsToRemove = reduceSink;
-              found = true;
-              break;
-            }
-          }
 
-          if (found) {
+      Set<ReduceSinkOperator> reduceSinks = OperatorUtils.findOperatorsUpstream(fsOp,
+          ReduceSinkOperator.class);
+      Operator<? extends OperatorDesc> rsToRemove = null;
+      List<ReduceSinkOperator> rsOps = parseCtx
+          .getReduceSinkOperatorsAddedByEnforceBucketingSorting();
+      boolean found = false;
+
+      // iterate through all RS and locate the one introduce by enforce bucketing
+      for (ReduceSinkOperator reduceSink : reduceSinks) {
+        for (ReduceSinkOperator rsOp : rsOps) {
+          if (reduceSink.equals(rsOp)) {
+            rsToRemove = reduceSink;
+            found = true;
             break;
           }
         }
 
-        // iF RS is found remove it and its child (EX) and connect its parent
-        // and grand child
         if (found) {
-          Operator<? extends OperatorDesc> rsParent = rsToRemove.getParentOperators().get(0);
-          Operator<? extends OperatorDesc> rsChild = rsToRemove.getChildOperators().get(0);
-          Operator<? extends OperatorDesc> rsGrandChild = rsChild.getChildOperators().get(0);
-
-          if (rsChild instanceof SelectOperator) {
-            // if schema size cannot be matched, then it could be because of constant folding
-            // converting partition column expression to constant expression. The constant
-            // expression will then get pruned by column pruner since it will not reference to
-            // any columns.
-            if (rsParent.getSchema().getSignature().size() !=
-                rsChild.getSchema().getSignature().size()) {
-              return false;
-            }
-            rsParent.getChildOperators().clear();
-            rsParent.getChildOperators().add(rsGrandChild);
-            rsGrandChild.getParentOperators().clear();
-            rsGrandChild.getParentOperators().add(rsParent);
-            LOG.info("Removed " + rsToRemove.getOperatorId() + " and " + rsChild.getOperatorId()
-                + " as it was introduced by enforce bucketing/sorting.");
-          }
+          break;
         }
       }
 
+      // iF RS is found remove it and its child (EX) and connect its parent
+      // and grand child
+      if (found) {
+        Operator<? extends OperatorDesc> rsParent = rsToRemove.getParentOperators().get(0);
+        Operator<? extends OperatorDesc> rsChild = rsToRemove.getChildOperators().get(0);
+        Operator<? extends OperatorDesc> rsGrandChild = rsChild.getChildOperators().get(0);
+
+        if (rsChild instanceof SelectOperator) {
+          // if schema size cannot be matched, then it could be because of constant folding
+          // converting partition column expression to constant expression. The constant
+          // expression will then get pruned by column pruner since it will not reference to
+          // any columns.
+          if (rsParent.getSchema().getSignature().size() !=
+              rsChild.getSchema().getSignature().size()) {
+            return false;
+          }
+          rsParent.getChildOperators().clear();
+          rsParent.getChildOperators().add(rsGrandChild);
+          rsGrandChild.getParentOperators().clear();
+          rsGrandChild.getParentOperators().add(rsParent);
+          LOG.info("Removed " + rsToRemove.getOperatorId() + " and " + rsChild.getOperatorId()
+              + " as it was introduced by enforce bucketing/sorting.");
+        }
+      }
       return true;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 69bb9d7..1b7873d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6054,7 +6054,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     // spray the data into multiple buckets. That way, we can support a very large
     // number of buckets without needing a very large number of reducers.
     boolean enforceBucketing = false;
-    boolean enforceSorting = false;
     ArrayList<ExprNodeDesc> partnCols = new ArrayList<ExprNodeDesc>();
     ArrayList<ExprNodeDesc> sortCols = new ArrayList<ExprNodeDesc>();
     ArrayList<Integer> sortOrders = new ArrayList<Integer>();
@@ -6062,8 +6061,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     int numFiles = 1;
     int totalFiles = 1;
 
-    if ((dest_tab.getNumBuckets() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+    if (dest_tab.getNumBuckets() > 0) {
       enforceBucketing = true;
       if (updating() || deleting()) {
         partnCols = getPartitionColsFromBucketColsForUpdateDelete(input, true);
@@ -6073,24 +6071,27 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     if ((dest_tab.getSortCols() != null) &&
-        (dest_tab.getSortCols().size() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))) {
-      enforceSorting = true;
+        (dest_tab.getSortCols().size() > 0)) {
       sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
       sortOrders = getSortOrders(dest, qb, dest_tab, input);
-      if (!enforceBucketing) {
-        partnCols = sortCols;
+      if (!enforceBucketing && !dest_tab.isIndexTable()) {
+        throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
+      } else {
+        if (!enforceBucketing) {
+          partnCols = sortCols;
+        }
       }
+      enforceBucketing = true;
     }
 
-    if (enforceBucketing || enforceSorting) {
+    if (enforceBucketing) {
       int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
       if (conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS) > 0) {
         maxReducers = conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS);
       }
       int numBuckets = dest_tab.getNumBuckets();
       if (numBuckets > maxReducers) {
-        LOG.debug("XXXXXX numBuckets is " + numBuckets + " and maxReducers is " + maxReducers);
+        LOG.debug("numBuckets is {}", numBuckets, " and maxReducers is {}", maxReducers);
         multiFileSpray = true;
         totalFiles = numBuckets;
         if (totalFiles % maxReducers == 0) {
@@ -6123,11 +6124,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   private void genPartnCols(String dest, Operator input, QB qb,
       TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
     boolean enforceBucketing = false;
-    boolean enforceSorting = false;
     ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
 
-    if ((dest_tab.getNumBuckets() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+    if ((dest_tab.getNumBuckets() > 0)) {
       enforceBucketing = true;
       if (updating() || deleting()) {
         partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false);
@@ -6138,15 +6137,19 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     if ((dest_tab.getSortCols() != null) &&
-        (dest_tab.getSortCols().size() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))) {
-      enforceSorting = true;
-      if (!enforceBucketing) {
-        partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
+        (dest_tab.getSortCols().size() > 0)) {
+      if (!enforceBucketing && !dest_tab.isIndexTable()) {
+        throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
       }
+      else {
+        if(!enforceBucketing) {
+          partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
+        }
+      }
+      enforceBucketing = true;
     }
 
-    if (enforceBucketing || enforceSorting) {
+    if (enforceBucketing) {
       ctx.setPartnCols(partnColsNoConvert);
     }
   }
@@ -6234,8 +6237,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         if (dpCtx.getSPPath() != null) {
           dest_path = new Path(dest_tab.getPath(), dpCtx.getSPPath());
         }
-        if ((dest_tab.getNumBuckets() > 0) &&
-            (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+        if ((dest_tab.getNumBuckets() > 0)) {
           dpCtx.setNumBuckets(dest_tab.getNumBuckets());
         }
       }
@@ -6542,12 +6544,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     RowSchema fsRS = new RowSchema(vecCol);
 
     // The output files of a FileSink can be merged if they are either not being written to a table
-    // or are being written to a table which is either not bucketed or enforce bucketing is not set
-    // and table the table is either not sorted or enforce sorting is not set
-    boolean canBeMerged = (dest_tab == null || !((dest_tab.getNumBuckets() > 0 &&
-        conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING)) ||
-        (dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 0 &&
-        conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))));
+    // or are being written to a table which is not bucketed
+    // and table the table is not sorted
+    boolean canBeMerged = (dest_tab == null || !((dest_tab.getNumBuckets() > 0) ||
+        (dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 0)));
 
     // If this table is working with ACID semantics, turn off merging
     canBeMerged &= !destTableIsAcid;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index e13e6eb..db8b7d6 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -28,9 +28,9 @@ import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 /**
- * The LockManager is not ready, but for no-concurrency straight-line path we can 
+ * The LockManager is not ready, but for no-concurrency straight-line path we can
  * test AC=true, and AC=false with commit/rollback/exception and test resulting data.
- * 
+ *
  * Can also test, calling commit in AC=true mode, etc, toggling AC...
  */
 public class TestTxnCommands {
@@ -50,7 +50,7 @@ public class TestTxnCommands {
     ACIDTBL2("acidTbl2"),
     NONACIDORCTBL("nonAcidOrcTbl"),
     NONACIDORCTBL2("nonAcidOrcTbl2");
-    
+
     private final String name;
     @Override
     public String toString() {
@@ -70,7 +70,6 @@ public class TestTxnCommands {
     hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
     hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
     TxnDbUtil.setConfValues(hiveConf);
-    hiveConf.setBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING, true);
     TxnDbUtil.prepDb();
     File f = new File(TEST_WAREHOUSE_DIR);
     if (f.exists()) {
@@ -107,7 +106,7 @@ public class TestTxnCommands {
       FileUtils.deleteDirectory(new File(TEST_DATA_DIR));
     }
   }
-  @Test 
+  @Test
   public void testInsertOverwrite() throws Exception {
     runStatementOnDriver("insert overwrite table " + Table.NONACIDORCTBL + " select a,b from " + Table.NONACIDORCTBL2);
     runStatementOnDriver("create table " + Table.NONACIDORCTBL2 + "3(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='false')");
@@ -211,7 +210,7 @@ public class TestTxnCommands {
     rs0 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b");
     Assert.assertEquals("Can't see my own write", 1, rs0.size());
   }
-  @Test 
+  @Test
   public void testReadMyOwnInsert() throws Exception {
     runStatementOnDriver("set autocommit false");
     runStatementOnDriver("START TRANSACTION");
@@ -431,6 +430,7 @@ public class TestTxnCommands {
     return rs;
   }
   private static final class RowComp implements Comparator<int[]> {
+    @Override
     public int compare(int[] row1, int[] row2) {
       assert row1 != null && row2 != null && row1.length == row2.length;
       for(int i = 0; i < row1.length; i++) {
@@ -462,7 +462,7 @@ public class TestTxnCommands {
     sb.setLength(sb.length() - 1);//remove trailing comma
     return sb.toString();
   }
-  
+
   private List<String> runStatementOnDriver(String stmt) throws Exception {
     CommandProcessorResponse cpr = d.run(stmt);
     if(cpr.getResponseCode() != 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 5aa2500..8616eb0 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -62,7 +62,7 @@ public class TestTxnCommands2 {
     ACIDTBLPART("acidTblPart"),
     NONACIDORCTBL("nonAcidOrcTbl"),
     NONACIDPART("nonAcidPart");
-    
+
     private final String name;
     @Override
     public String toString() {
@@ -82,7 +82,6 @@ public class TestTxnCommands2 {
     hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
     hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
     TxnDbUtil.setConfValues(hiveConf);
-    hiveConf.setBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING, true);
     TxnDbUtil.prepDb();
     File f = new File(TEST_WAREHOUSE_DIR);
     if (f.exists()) {
@@ -330,6 +329,7 @@ public class TestTxnCommands2 {
     return rs;
   }
   private static final class RowComp implements Comparator<int[]> {
+    @Override
     public int compare(int[] row1, int[] row2) {
       assert row1 != null && row2 != null && row1.length == row2.length;
       for(int i = 0; i < row1.length; i++) {
@@ -361,7 +361,7 @@ public class TestTxnCommands2 {
     sb.setLength(sb.length() - 1);//remove trailing comma
     return sb.toString();
   }
-  
+
   private List<String> runStatementOnDriver(String stmt) throws Exception {
     CommandProcessorResponse cpr = d.run(stmt);
     if(cpr.getResponseCode() != 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 44ad8b0..c6a7fcb 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -51,7 +51,6 @@ public class TestDbTxnManager2 {
   public static void setUpClass() throws Exception {
     TxnDbUtil.setConfValues(conf);
     conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
-    conf.setBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING, true);
   }
   @Before
   public void setUp() throws Exception {
@@ -211,8 +210,8 @@ public class TestDbTxnManager2 {
     Assert.assertEquals("Unexpected number of locks found", 0, locks.size());
     checkCmdOnDriver(cpr);
   }
-  
-  
+
+
   private void checkLock(LockType type, LockState state, String db, String table, String partition, ShowLocksResponseElement l) {
     Assert.assertEquals(l.toString(),l.getType(), type);
     Assert.assertEquals(l.toString(),l.getState(), state);

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/acid_overwrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/acid_overwrite.q b/ql/src/test/queries/clientnegative/acid_overwrite.q
index 2e57a3c..9ccf31e 100644
--- a/ql/src/test/queries/clientnegative/acid_overwrite.q
+++ b/ql/src/test/queries/clientnegative/acid_overwrite.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/archive_corrupt.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_corrupt.q b/ql/src/test/queries/clientnegative/archive_corrupt.q
index 130b37b..ed49688 100644
--- a/ql/src/test/queries/clientnegative/archive_corrupt.q
+++ b/ql/src/test/queries/clientnegative/archive_corrupt.q
@@ -1,7 +1,7 @@
 USE default;
 
 set hive.archive.enabled = true;
-set hive.enforce.bucketing = true;
+
 
 drop table tstsrcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q b/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
index f2de306..28c256e 100644
--- a/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
+++ b/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 -- check update without update priv

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q b/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
index c00c0eb..674ad1e 100644
--- a/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
+++ b/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 -- check update without update priv

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/delete_non_acid_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_non_acid_table.q b/ql/src/test/queries/clientnegative/delete_non_acid_table.q
index 6ae82ff..ec3d803 100644
--- a/ql/src/test/queries/clientnegative/delete_non_acid_table.q
+++ b/ql/src/test/queries/clientnegative/delete_non_acid_table.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing=true;
+
 
 create table not_an_acid_table2(a int, b varchar(128));
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/delete_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_not_bucketed.q b/ql/src/test/queries/clientnegative/delete_not_bucketed.q
index 80dffea..d575a8f 100644
--- a/ql/src/test/queries/clientnegative/delete_not_bucketed.q
+++ b/ql/src/test/queries/clientnegative/delete_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/delete_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_sorted.q b/ql/src/test/queries/clientnegative/delete_sorted.q
index fd8d579..9f82c1f 100644
--- a/ql/src/test/queries/clientnegative/delete_sorted.q
+++ b/ql/src/test/queries/clientnegative/delete_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/insert_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_sorted.q b/ql/src/test/queries/clientnegative/insert_sorted.q
index 18c942a..cd1a69c 100644
--- a/ql/src/test/queries/clientnegative/insert_sorted.q
+++ b/ql/src/test/queries/clientnegative/insert_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/insert_values_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_values_sorted.q b/ql/src/test/queries/clientnegative/insert_values_sorted.q
index 260e2fb..ee26402 100644
--- a/ql/src/test/queries/clientnegative/insert_values_sorted.q
+++ b/ql/src/test/queries/clientnegative/insert_values_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/merge_negative_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/merge_negative_3.q b/ql/src/test/queries/clientnegative/merge_negative_3.q
index 6bc645e..f5eb231 100644
--- a/ql/src/test/queries/clientnegative/merge_negative_3.q
+++ b/ql/src/test/queries/clientnegative/merge_negative_3.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 create table srcpart2 (key int, value string) partitioned by (ds string) clustered by (key) sorted by (key) into 2 buckets stored as RCFILE;
 insert overwrite table srcpart2 partition (ds='2011') select * from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q b/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
index 880323c..c252d86 100644
--- a/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
+++ b/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+
+
 set hive.exec.reducers.max = 1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/smb_mapjoin_14.q b/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
index 54bfba0..4c93542 100644
--- a/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q b/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
index 7d11f45..8fbbd96 100644
--- a/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
+++ b/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
@@ -3,8 +3,8 @@ INTO 1 BUCKETS STORED AS RCFILE;
 create table table_desc(key int, value string) CLUSTERED BY (key) SORTED BY (key desc) 
 INTO 1 BUCKETS STORED AS RCFILE;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+
+
 
 insert overwrite table table_asc select key, value from src; 
 insert overwrite table table_desc select key, value from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_bucket_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_bucket_col.q b/ql/src/test/queries/clientnegative/update_bucket_col.q
index 515e024..c471a4c 100644
--- a/ql/src/test/queries/clientnegative/update_bucket_col.q
+++ b/ql/src/test/queries/clientnegative/update_bucket_col.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_no_such_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_no_such_table.q b/ql/src/test/queries/clientnegative/update_no_such_table.q
index 07239cf..dffbab4 100644
--- a/ql/src/test/queries/clientnegative/update_no_such_table.q
+++ b/ql/src/test/queries/clientnegative/update_no_such_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 update no_such_table set b = 'fred';

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_non_acid_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_non_acid_table.q b/ql/src/test/queries/clientnegative/update_non_acid_table.q
index dd0b01e..da46141 100644
--- a/ql/src/test/queries/clientnegative/update_non_acid_table.q
+++ b/ql/src/test/queries/clientnegative/update_non_acid_table.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing=true;
+
 
 create table not_an_acid_table(a int, b varchar(128));
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_not_bucketed.q b/ql/src/test/queries/clientnegative/update_not_bucketed.q
index 8512fa7..d7d0da4 100644
--- a/ql/src/test/queries/clientnegative/update_not_bucketed.q
+++ b/ql/src/test/queries/clientnegative/update_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) partitioned by (ds string) stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_partition_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_partition_col.q b/ql/src/test/queries/clientnegative/update_partition_col.q
index e9c60cc..78d381e 100644
--- a/ql/src/test/queries/clientnegative/update_partition_col.q
+++ b/ql/src/test/queries/clientnegative/update_partition_col.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientnegative/update_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_sorted.q b/ql/src/test/queries/clientnegative/update_sorted.q
index 917c3b5..f9e5db5 100644
--- a/ql/src/test/queries/clientnegative/update_sorted.q
+++ b/ql/src/test/queries/clientnegative/update_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/acid_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_join.q b/ql/src/test/queries/clientpositive/acid_join.q
index 2e6aeae..dca4d7d 100644
--- a/ql/src/test/queries/clientpositive/acid_join.q
+++ b/ql/src/test/queries/clientpositive/acid_join.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- This test checks that a join with tables with two different buckets send the right bucket info to each table.
 create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true"); 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/acid_vectorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization.q b/ql/src/test/queries/clientpositive/acid_vectorization.q
index 4b11412..514d3fa 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_partition.q b/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
index 00449bb..8dd1e09 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 CREATE TABLE acid_vectorized_part(a INT, b STRING) partitioned by (ds string) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/acid_vectorization_project.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_project.q b/ql/src/test/queries/clientpositive/acid_vectorization_project.q
index a44b57a..2a5f59a 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_project.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_project.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
deleted file mode 100644
index 627fcc1..0000000
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
+++ /dev/null
@@ -1,59 +0,0 @@
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
-create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
-
-alter table tst1 clustered by (key) into 8 buckets;
-
-describe formatted tst1;
-
-set hive.enforce.bucketing=true;
-insert overwrite table tst1 partition (ds='1') select key, value from src;
-
-describe formatted tst1 partition (ds = '1');
-
--- Test changing bucket number
-
-alter table tst1 clustered by (key) into 12 buckets;
-
-insert overwrite table tst1 partition (ds='1') select key, value from src;
-
-describe formatted tst1 partition (ds = '1');
-
-describe formatted tst1;
-
--- Test changing bucket number of (table/partition)
-
-alter table tst1 into 4 buckets;
-
-describe formatted tst1;
-
-describe formatted tst1 partition (ds = '1');
-
-alter table tst1 partition (ds = '1') into 6 buckets;
-
-describe formatted tst1;
-
-describe formatted tst1 partition (ds = '1');
-
--- Test adding sort order
-
-alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets;
-
-describe formatted tst1;
-
--- Test changing sort order
-
-alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets;
-
-describe formatted tst1;
-
--- Test removing test order
-
-alter table tst1 clustered by (value) into 12 buckets;
-
-describe formatted tst1;
-
--- Test removing buckets
-
-alter table tst1 not clustered;
-
-describe formatted tst1;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
deleted file mode 100644
index 2f26de8..0000000
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
+++ /dev/null
@@ -1,85 +0,0 @@
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
--- the partition metadata is updated as well.
-
-CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING);
-
-DESCRIBE FORMATTED tst1;
-
-SET hive.enforce.bucketing=true;
-SET hive.enforce.sorting=true;
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test an unbucketed partition gets converted to bucketed
-ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test an unsorted partition gets converted to sorted
-ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the bucket columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the number of buckets
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the sort columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the sort order
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test a sorted partition gets converted to unsorted
-ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test a bucketed partition gets converted to unbucketed
-ALTER TABLE tst1 NOT CLUSTERED;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
index 2c2e184..15a88bb 100644
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
+++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
@@ -1,4 +1,3 @@
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
 -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
 -- the partition metadata is updated as well.
 
@@ -6,8 +5,8 @@ CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING);
 
 DESCRIBE FORMATTED tst1;
 
-SET hive.enforce.bucketing=true;
-SET hive.enforce.sorting=true;
+
+
 INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
 
 DESCRIBE FORMATTED tst1 PARTITION (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
index 439f351..7d523d9 100644
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
+++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
@@ -5,7 +5,7 @@ alter table tst1 clustered by (key) into 8 buckets;
 
 describe formatted tst1;
 
-set hive.enforce.bucketing=true;
+
 insert overwrite table tst1 partition (ds='1') select key, value from src;
 
 describe formatted tst1 partition (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q b/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
index 316276a..b046f97 100644
--- a/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
+++ b/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
@@ -1,5 +1,5 @@
 set hive.archive.enabled = true;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.submitviachild=true;
 set hive.exec.submit.local.task.via.child=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/archive_multi.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/archive_multi.q b/ql/src/test/queries/clientpositive/archive_multi.q
index 1004aca..0259a3e 100644
--- a/ql/src/test/queries/clientpositive/archive_multi.q
+++ b/ql/src/test/queries/clientpositive/archive_multi.q
@@ -1,5 +1,5 @@
 set hive.archive.enabled = true;
-set hive.enforce.bucketing = true;
+;
 
 create database ac_test;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/authorization_delete.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_delete.q b/ql/src/test/queries/clientpositive/authorization_delete.q
index d96e6ab..fe1a9ac 100644
--- a/ql/src/test/queries/clientpositive/authorization_delete.q
+++ b/ql/src/test/queries/clientpositive/authorization_delete.q
@@ -4,7 +4,7 @@ set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 set user.name=user1;
 -- current user has been set (comment line before the set cmd is resulting in parse error!!)

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_delete_own_table.q b/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
index 7abdc12..34dfa6a 100644
--- a/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
+++ b/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 set user.name=user1;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/authorization_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_update.q b/ql/src/test/queries/clientpositive/authorization_update.q
index da1054e..5e57904 100644
--- a/ql/src/test/queries/clientpositive/authorization_update.q
+++ b/ql/src/test/queries/clientpositive/authorization_update.q
@@ -4,7 +4,7 @@ set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 set user.name=user1;
 -- current user has been set (comment line before the set cmd is resulting in parse error!!)

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/authorization_update_own_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_update_own_table.q b/ql/src/test/queries/clientpositive/authorization_update_own_table.q
index ace1ce2..e3292d2 100644
--- a/ql/src/test/queries/clientpositive/authorization_update_own_table.q
+++ b/ql/src/test/queries/clientpositive/authorization_update_own_table.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 set user.name=user1;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
index 699777e..4dca15b 100644
--- a/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
index c07dd23..77b2282 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
index f35fec1..1c868dc 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
index eabeff0..3fa1463 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
index a553d93..64b3e5f 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
index cb244cf..83b67f8 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
@@ -1,8 +1,8 @@
 set hive.auto.convert.join=true;
 
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
index 0ddf378..33fe283 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 set hive.explain.user=false;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
index 9eb85d3..917aec9 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket1.q b/ql/src/test/queries/clientpositive/bucket1.q
index 0154b4e..6a59465 100644
--- a/ql/src/test/queries/clientpositive/bucket1.q
+++ b/ql/src/test/queries/clientpositive/bucket1.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 200;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket2.q b/ql/src/test/queries/clientpositive/bucket2.q
index ecd7e53..4e63859 100644
--- a/ql/src/test/queries/clientpositive/bucket2.q
+++ b/ql/src/test/queries/clientpositive/bucket2.q
@@ -1,5 +1,5 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket3.q b/ql/src/test/queries/clientpositive/bucket3.q
index 7b7a9c3..b11e4da 100644
--- a/ql/src/test/queries/clientpositive/bucket3.q
+++ b/ql/src/test/queries/clientpositive/bucket3.q
@@ -1,5 +1,5 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket4.q b/ql/src/test/queries/clientpositive/bucket4.q
index 1b49c7a..7cd962d 100644
--- a/ql/src/test/queries/clientpositive/bucket4.q
+++ b/ql/src/test/queries/clientpositive/bucket4.q
@@ -1,7 +1,7 @@
 set hive.explain.user=false;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket5.q b/ql/src/test/queries/clientpositive/bucket5.q
index 877f8a5..0b3bcc5 100644
--- a/ql/src/test/queries/clientpositive/bucket5.q
+++ b/ql/src/test/queries/clientpositive/bucket5.q
@@ -1,6 +1,6 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles = true;
 set hive.merge.mapredfiles = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket6.q b/ql/src/test/queries/clientpositive/bucket6.q
index fb55787..a12f6bd 100644
--- a/ql/src/test/queries/clientpositive/bucket6.q
+++ b/ql/src/test/queries/clientpositive/bucket6.q
@@ -1,7 +1,7 @@
 CREATE TABLE src_bucket(key STRING, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
 
-set hive.enforce.sorting = true;
-set hive.enforce.bucketing = true;
+
+;
 
 explain
 insert into table src_bucket select key,value from srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_many.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_many.q b/ql/src/test/queries/clientpositive/bucket_many.q
index 1f0b795..8a64ff1 100644
--- a/ql/src/test/queries/clientpositive/bucket_many.q
+++ b/ql/src/test/queries/clientpositive/bucket_many.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set mapred.reduce.tasks = 16;
 
 create table bucket_many(key int, value string) clustered by (key) into 256 buckets;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_1.q b/ql/src/test/queries/clientpositive/bucket_map_join_1.q
index 6bdb09e..deae460 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_1.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_1.q
@@ -1,8 +1,8 @@
 drop table table1;
 drop table table2;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 
 create table table1(key string, value string) clustered by (key, value)
 sorted by (key, value) into 1 BUCKETS stored as textfile;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_2.q b/ql/src/test/queries/clientpositive/bucket_map_join_2.q
index 07f6d15..f416706 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_2.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_2.q
@@ -1,8 +1,8 @@
 drop table table1;
 drop table table2;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 
 create table table1(key string, value string) clustered by (key, value)
 sorted by (key desc, value desc) into 1 BUCKETS stored as textfile;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q b/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
index 1ca20e4..4b75685 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS
@@ -17,8 +17,8 @@ select * from src where key < 10;
 insert overwrite table tbl3
 select * from src where key < 10;
 
-set hive.enforce.bucketing = false;
-set hive.enforce.sorting = false;
+;
+
 set hive.exec.reducers.max = 100;
 
 set hive.auto.convert.join=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
index 8546e78..40dad17 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
@@ -15,8 +15,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
index 2f968bd..1e7db5e 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
@@ -15,8 +15,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_num_reducers.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_num_reducers.q b/ql/src/test/queries/clientpositive/bucket_num_reducers.q
index 37ae6cc..06f334e 100644
--- a/ql/src/test/queries/clientpositive/bucket_num_reducers.q
+++ b/ql/src/test/queries/clientpositive/bucket_num_reducers.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set hive.exec.mode.local.auto=false;
 set mapred.reduce.tasks = 10;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
index 8c64d60..48e5f01 100644
--- a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
+++ b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set hive.exec.mode.local.auto=false;
 set hive.exec.reducers.max = 2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketmapjoin13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin13.q b/ql/src/test/queries/clientpositive/bucketmapjoin13.q
index f01c43e..fd2f22a 100644
--- a/ql/src/test/queries/clientpositive/bucketmapjoin13.q
+++ b/ql/src/test/queries/clientpositive/bucketmapjoin13.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max=1;
 
 CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketmapjoin6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin6.q b/ql/src/test/queries/clientpositive/bucketmapjoin6.q
index a0ef371..9da0619 100644
--- a/ql/src/test/queries/clientpositive/bucketmapjoin6.q
+++ b/ql/src/test/queries/clientpositive/bucketmapjoin6.q
@@ -7,8 +7,8 @@ create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 bu
 create table tmp2 (a string, b string) clustered by (a) sorted by (a) into 10 buckets;
 
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max=1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
index 8cc308f..8f8d625 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
index 9ecd2c4..a66378c 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
index 91e97de..6027707 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
index 623b22b..0f1e8c6 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
index 205a450..6f4becd 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
index a4e84f8..a609422 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
index f597884..b8370c6 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
index 95a9a64..b34f8d1 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
index 096ae10..b906db2 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
@@ -1,7 +1,7 @@
 set hive.cbo.returnpath.hiveop=true;
 set hive.stats.fetch.column.stats=true;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/combine3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/combine3.q b/ql/src/test/queries/clientpositive/combine3.q
index c9afc91..9e5809b 100644
--- a/ql/src/test/queries/clientpositive/combine3.q
+++ b/ql/src/test/queries/clientpositive/combine3.q
@@ -20,7 +20,7 @@ desc extended combine_3_srcpart_seq_rc partition(ds="2010-08-03", hr="001");
 
 select key, value, ds, hr from combine_3_srcpart_seq_rc where ds="2010-08-03" order by key, hr limit 30;
 
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 drop table bucket3_1;


[2/4] hive git commit: HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
deleted file mode 100644
index b1dfd7c..0000000
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
+++ /dev/null
@@ -1,553 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
-create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
-create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tst1
-PREHOOK: query: alter table tst1 clustered by (key) into 8 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: alter table tst1 clustered by (key) into 8 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: describe formatted tst1 partition (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1 partition (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing bucket number
-
-alter table tst1 clustered by (key) into 12 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing bucket number
-
-alter table tst1 clustered by (key) into 12 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: describe formatted tst1 partition (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1 partition (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing bucket number of (table/partition)
-
-alter table tst1 into 4 buckets
-PREHOOK: type: ALTERTABLE_BUCKETNUM
-PREHOOK: Input: default@tst1
-POSTHOOK: query: -- Test changing bucket number of (table/partition)
-
-alter table tst1 into 4 buckets
-POSTHOOK: type: ALTERTABLE_BUCKETNUM
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted tst1 partition (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1 partition (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets
-PREHOOK: type: ALTERPARTITION_BUCKETNUM
-PREHOOK: Input: default@tst1
-POSTHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets
-POSTHOOK: type: ALTERPARTITION_BUCKETNUM
-POSTHOOK: Input: default@tst1
-POSTHOOK: Input: default@tst1@ds=1
-POSTHOOK: Output: default@tst1@ds=1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted tst1 partition (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1 partition (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	false               
-#### A masked pattern was here ####
-	numFiles            	1                   
-	numRows             	-1                  
-	rawDataSize         	-1                  
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	6                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test adding sort order
-
-alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test adding sort order
-
-alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing sort order
-
-alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing sort order
-
-alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:value, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test removing test order
-
-alter table tst1 clustered by (value) into 12 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test removing test order
-
-alter table tst1 clustered by (value) into 12 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test removing buckets
-
-alter table tst1 not clustered
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test removing buckets
-
-alter table tst1 not clustered
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
deleted file mode 100644
index e5f8e7f..0000000
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
+++ /dev/null
@@ -1,851 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
--- the partition metadata is updated as well.
-
-CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
--- the partition metadata is updated as well.
-
-CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test an unbucketed partition gets converted to bucketed
-ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test an unbucketed partition gets converted to bucketed
-ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test an unsorted partition gets converted to sorted
-ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test an unsorted partition gets converted to sorted
-ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing the bucket columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing the bucket columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing the number of buckets
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing the number of buckets
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing the sort columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing the sort columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing the sort order
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing the sort order
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test a sorted partition gets converted to unsorted
-ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test a sorted partition gets converted to unsorted
-ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test a bucketed partition gets converted to unbucketed
-ALTER TABLE tst1 NOT CLUSTERED
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test a bucketed partition gets converted to unbucketed
-ALTER TABLE tst1 NOT CLUSTERED
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
index f919f10..29a4c4b 100644
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
+++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
@@ -1,13 +1,11 @@
-PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
+PREHOOK: query: -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
 -- the partition metadata is updated as well.
 
 CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@tst1
-POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
+POSTHOOK: query: -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
 -- the partition metadata is updated as well.
 
 CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING)

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/cp_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cp_sel.q.out b/ql/src/test/results/clientpositive/cp_sel.q.out
index a2c9fe0..a55b28d 100644
--- a/ql/src/test/results/clientpositive/cp_sel.q.out
+++ b/ql/src/test/results/clientpositive/cp_sel.q.out
@@ -75,13 +75,8 @@ insert overwrite table testpartbucket partition(ds,hr) select key,value,'hello'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-0 depends on stages: Stage-1
   Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
 
 STAGE PLANS:
   Stage: Stage-1
@@ -91,26 +86,28 @@ STAGE PLANS:
             alias: srcpart
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), value (type: string), 'hello' (type: string), 'world' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.testpartbucket
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
+                value expressions: _col1 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), 'hello' (type: string), 'world' (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.testpartbucket
 
   Stage: Stage-0
     Move Operator
@@ -128,36 +125,6 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.testpartbucket
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.testpartbucket
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: insert overwrite table testpartbucket partition(ds,hr) select key,value,'hello' as ds, 'world' as hr from srcpart where hr=11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -182,9 +149,9 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@testpartbucket
 POSTHOOK: Input: default@testpartbucket@ds=hello/hr=world
 #### A masked pattern was here ####
-238	val_238	hello	world
-86	val_86	hello	world
-311	val_311	hello	world
+0	val_0	hello	world
+0	val_0	hello	world
+0	val_0	hello	world
 PREHOOK: query: drop table testpartbucket
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@testpartbucket

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/index_auto_update.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_update.q.out b/ql/src/test/results/clientpositive/index_auto_update.q.out
index 11af3f5..c85c80f 100644
--- a/ql/src/test/results/clientpositive/index_auto_update.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_update.q.out
@@ -118,7 +118,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                  Map-reduce partition columns: _col0 (type: string)
                   value expressions: _col2 (type: array<bigint>)
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out b/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out
index 32e6e92..5d44d27 100644
--- a/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out
+++ b/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out
@@ -1,8 +1,16 @@
-PREHOOK: query: create table studenttab10k (age2 int)
+PREHOOK: query: -- SORT_QUERY_RESULTS;
+
+
+
+create table studenttab10k (age2 int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@studenttab10k
-POSTHOOK: query: create table studenttab10k (age2 int)
+POSTHOOK: query: -- SORT_QUERY_RESULTS;
+
+
+
+create table studenttab10k (age2 int)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@studenttab10k

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_analyze.q.out b/ql/src/test/results/clientpositive/orc_analyze.q.out
index bc46852..1156feb 100644
--- a/ql/src/test/results/clientpositive/orc_analyze.q.out
+++ b/ql/src/test/results/clientpositive/orc_analyze.q.out
@@ -904,10 +904,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -947,10 +947,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1002,10 +1002,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1045,10 +1045,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1100,10 +1100,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1143,10 +1143,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1249,10 +1249,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1292,10 +1292,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
index 7b54dbe..e159f5e 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
@@ -148,35 +148,12 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col0 (type: int), _col7 (type: string)
                   outputColumnNames: _col0, _col1
-                  File Output Operator
-                    compressed: false
-                    GlobalTableId: 1
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
-                    Static Partition Specification: ds=1/
-#### A masked pattern was here ####
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        properties:
-                          bucket_count 16
-                          bucket_field_name key
-                          columns key,value
-                          columns.comments 
-                          columns.types int:string
-#### A masked pattern was here ####
-                          name default.test_table3
-                          partition_columns ds
-                          partition_columns.types string
-                          serialization.ddl struct test_table3 { i32 key, string value}
-                          serialization.format 1
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.test_table3
-                    TotalFiles: 1
-                    GatherStats: true
-                    MultiFileSpray: false
+                  Reduce Output Operator
+                    sort order: 
+                    Map-reduce partition columns: _col0 (type: int)
+                    tag: -1
+                    value expressions: _col0 (type: int), _col1 (type: string)
+                    auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -230,6 +207,40 @@ STAGE PLANS:
             name: default.test_table1
       Truncated Path -> Alias:
         /test_table1/ds=1 [a]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: int), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1
+          File Output Operator
+            compressed: false
+            GlobalTableId: 1
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 16
+            Static Partition Specification: ds=1/
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  bucket_count 16
+                  bucket_field_name key
+                  columns key,value
+                  columns.comments 
+                  columns.types int:string
+#### A masked pattern was here ####
+                  name default.test_table3
+                  partition_columns ds
+                  partition_columns.types string
+                  serialization.ddl struct test_table3 { i32 key, string value}
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.test_table3
+            TotalFiles: 16
+            GatherStats: true
+            MultiFileSpray: true
 
   Stage: Stage-0
     Move Operator
@@ -2023,7 +2034,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: (ds%3D1)000001_0
+            base file name: 000001_0
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -2069,7 +2080,7 @@ STAGE PLANS:
               name: default.test_table3
             name: default.test_table3
       Truncated Path -> Alias:
-        /test_table3/ds=1/(ds%3D1)000001_0 [test_table3]
+        /test_table3/ds=1/000001_0 [test_table3]
 
   Stage: Stage-0
     Fetch Operator
@@ -2138,66 +2149,18 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test_table3
 POSTHOOK: Input: default@test_table3@ds=1
 #### A masked pattern was here ####
-17	val_17	1
-33	val_33	1
-65	val_65	1
-97	val_97	1
-97	val_97	1
-97	val_97	1
-97	val_97	1
-113	val_113	1
-113	val_113	1
-113	val_113	1
-113	val_113	1
-129	val_129	1
-129	val_129	1
-129	val_129	1
-129	val_129	1
-145	val_145	1
-177	val_177	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-209	val_209	1
-209	val_209	1
-209	val_209	1
-209	val_209	1
-241	val_241	1
-257	val_257	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-289	val_289	1
-305	val_305	1
-321	val_321	1
-321	val_321	1
-321	val_321	1
-321	val_321	1
-353	val_353	1
-353	val_353	1
-353	val_353	1
-353	val_353	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
+497	val_497	1
+481	val_481	1
+449	val_449	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
 401	val_401	1
 401	val_401	1
 401	val_401	1
@@ -2223,18 +2186,66 @@ POSTHOOK: Input: default@test_table3@ds=1
 401	val_401	1
 401	val_401	1
 401	val_401	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-449	val_449	1
-481	val_481	1
-497	val_497	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+353	val_353	1
+353	val_353	1
+353	val_353	1
+353	val_353	1
+321	val_321	1
+321	val_321	1
+321	val_321	1
+321	val_321	1
+305	val_305	1
+289	val_289	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+257	val_257	1
+241	val_241	1
+209	val_209	1
+209	val_209	1
+209	val_209	1
+209	val_209	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+177	val_177	1
+145	val_145	1
+129	val_129	1
+129	val_129	1
+129	val_129	1
+129	val_129	1
+113	val_113	1
+113	val_113	1
+113	val_113	1
+113	val_113	1
+97	val_97	1
+97	val_97	1
+97	val_97	1
+97	val_97	1
+65	val_65	1
+33	val_33	1
+17	val_17	1
 PREHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed
 SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
index 4d912ca..af885b9 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
@@ -127,6 +127,8 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -153,36 +155,13 @@ STAGE PLANS:
                         expressions: _col0 (type: int), _col7 (type: string)
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          GlobalTableId: 1
-#### A masked pattern was here ####
-                          NumFilesPerFileSink: 1
-                          Static Partition Specification: ds=1/
+                        Reduce Output Operator
+                          sort order: 
+                          Map-reduce partition columns: _col0 (type: int)
                           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                          table:
-                              input format: org.apache.hadoop.mapred.TextInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                              properties:
-                                bucket_count 16
-                                bucket_field_name key
-                                columns key,value
-                                columns.comments 
-                                columns.types int:string
-#### A masked pattern was here ####
-                                name default.test_table3
-                                partition_columns ds
-                                partition_columns.types string
-                                serialization.ddl struct test_table3 { i32 key, string value}
-                                serialization.format 1
-                                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                              name: default.test_table3
-                          TotalFiles: 1
-                          GatherStats: true
-                          MultiFileSpray: false
+                          tag: -1
+                          value expressions: _col0 (type: int), _col1 (type: string)
+                          auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -236,6 +215,43 @@ STAGE PLANS:
                   name: default.test_table1
             Truncated Path -> Alias:
               /test_table1/ds=1 [a]
+        Reducer 2 
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: int), VALUE._col1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 16
+                  Static Partition Specification: ds=1/
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        bucket_count 16
+                        bucket_field_name key
+                        columns key,value
+                        columns.comments 
+                        columns.types int:string
+#### A masked pattern was here ####
+                        name default.test_table3
+                        partition_columns ds
+                        partition_columns.types string
+                        serialization.ddl struct test_table3 { i32 key, string value}
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+                  TotalFiles: 16
+                  GatherStats: true
+                  MultiFileSpray: true
 
   Stage: Stage-0
     Move Operator


[3/4] hive git commit: HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q b/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
index 9110dcc..82c18e2 100644
--- a/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/delete_all_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_all_partitioned.q b/ql/src/test/queries/clientpositive/delete_all_partitioned.q
index f082b6d..122b3e2 100644
--- a/ql/src/test/queries/clientpositive/delete_all_partitioned.q
+++ b/ql/src/test/queries/clientpositive/delete_all_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/delete_orig_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_orig_table.q b/ql/src/test/queries/clientpositive/delete_orig_table.q
index fd23f4b..88cc830 100644
--- a/ql/src/test/queries/clientpositive/delete_orig_table.q
+++ b/ql/src/test/queries/clientpositive/delete_orig_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/delete_orig_table;
 dfs -copyFromLocal ../../data/files/alltypesorc ${system:test.tmp.dir}/delete_orig_table/00000_0; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/delete_tmp_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_tmp_table.q b/ql/src/test/queries/clientpositive/delete_tmp_table.q
index eb6c095..c7d8aa6 100644
--- a/ql/src/test/queries/clientpositive/delete_tmp_table.q
+++ b/ql/src/test/queries/clientpositive/delete_tmp_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/delete_where_no_match.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_where_no_match.q b/ql/src/test/queries/clientpositive/delete_where_no_match.q
index 8ed979d..f13dd73 100644
--- a/ql/src/test/queries/clientpositive/delete_where_no_match.q
+++ b/ql/src/test/queries/clientpositive/delete_where_no_match.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q b/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
index dac5375..de1ca36 100644
--- a/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/delete_where_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_where_partitioned.q b/ql/src/test/queries/clientpositive/delete_where_partitioned.q
index f84f26a..2fb950f 100644
--- a/ql/src/test/queries/clientpositive/delete_where_partitioned.q
+++ b/ql/src/test/queries/clientpositive/delete_where_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/delete_whole_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_whole_partition.q b/ql/src/test/queries/clientpositive/delete_whole_partition.q
index 8228a32..3d6c1e5 100644
--- a/ql/src/test/queries/clientpositive/delete_whole_partition.q
+++ b/ql/src/test/queries/clientpositive/delete_whole_partition.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
index d7f9ac8..c67426f 100644
--- a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
+++ b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q
index 95ae6e3..91fe7c5 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q
@@ -15,8 +15,8 @@ load data local inpath '../../data/files/sortdp.txt' overwrite into table t1_sta
 
 set hive.optimize.sort.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.sorting=true;
-set hive.enforce.bucketing=true;
+
+
 
 drop table t1;
 
@@ -44,8 +44,8 @@ dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000008_0;
 
 set hive.optimize.sort.dynamic.partition=false;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.sorting=true;
-set hive.enforce.bucketing=true;
+
+
 
 -- disable sorted dynamic partition optimization to make sure the results are correct
 drop table t1;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
index 3d0cdcd..422b711 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
@@ -5,8 +5,8 @@ set hive.exec.max.dynamic.partitions=1000;
 set hive.exec.max.dynamic.partitions.pernode=1000;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 create table over1k(
            t tinyint,
@@ -67,8 +67,8 @@ insert overwrite table over1k_part_limit_orc partition(ds="foo", t) select si,i,
 insert overwrite table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
 insert overwrite table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
 explain insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
index a1a87d8..2d21f32 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
@@ -4,8 +4,8 @@ set hive.exec.dynamic.partition=true;
 set hive.exec.max.dynamic.partitions=1000;
 set hive.exec.max.dynamic.partitions.pernode=1000;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 create table over1k(
            t tinyint,
@@ -61,8 +61,8 @@ insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,
 insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27;
 insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27;
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
 explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
index c18f1cc..8fd79d6 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
@@ -4,8 +4,8 @@ set hive.exec.dynamic.partition=true;
 set hive.exec.max.dynamic.partitions=1000;
 set hive.exec.max.dynamic.partitions.pernode=1000;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
index a2f2c77..d1d1851 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 set hive.optimize.sort.dynamic.partition=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
index 9556eed..371fd75 100644
--- a/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
+++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
@@ -1,7 +1,7 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
index 69687df..b52e740 100644
--- a/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
+++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/enforce_order.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/enforce_order.q b/ql/src/test/queries/clientpositive/enforce_order.q
index 6a303c3..da18684 100644
--- a/ql/src/test/queries/clientpositive/enforce_order.q
+++ b/ql/src/test/queries/clientpositive/enforce_order.q
@@ -1,7 +1,7 @@
 drop table table_asc;
 drop table table_desc;
 
-set hive.enforce.sorting = true;
+
 
 create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS;
 create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/explainuser_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_1.q b/ql/src/test/queries/clientpositive/explainuser_1.q
index 7ae1ed6..785bb07 100644
--- a/ql/src/test/queries/clientpositive/explainuser_1.q
+++ b/ql/src/test/queries/clientpositive/explainuser_1.q
@@ -293,7 +293,7 @@ create table if not exists nzhang_ctas3 as select key, value from src sort by ke
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 explain create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
@@ -334,8 +334,8 @@ CREATE TABLE smb_input(key int, value int);
 LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input;
 LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input;
 
-set hive.enforce.sorting = true;
-set hive.enforce.bucketing = true;
+
+;
 
 CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
 CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/explainuser_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_2.q b/ql/src/test/queries/clientpositive/explainuser_2.q
index 560a086..da107dc 100644
--- a/ql/src/test/queries/clientpositive/explainuser_2.q
+++ b/ql/src/test/queries/clientpositive/explainuser_2.q
@@ -174,8 +174,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/explainuser_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_3.q b/ql/src/test/queries/clientpositive/explainuser_3.q
index f604d38..dd86b76 100644
--- a/ql/src/test/queries/clientpositive/explainuser_3.q
+++ b/ql/src/test/queries/clientpositive/explainuser_3.q
@@ -2,7 +2,7 @@ set hive.explain.user=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
 
@@ -140,8 +140,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1.q b/ql/src/test/queries/clientpositive/groupby_sort_1.q
index ed888bb..4909f16 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_1.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_1.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_10.q b/ql/src/test/queries/clientpositive/groupby_sort_10.q
index b3ddd42..4c650f9 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_10.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_10.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_11.q b/ql/src/test/queries/clientpositive/groupby_sort_11.q
index 19063f6..32a9658 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_11.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_11.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
index a6e18c7..d81e190 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_2.q b/ql/src/test/queries/clientpositive/groupby_sort_2.q
index 1574048..8e5a82e 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_2.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_2.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_3.q b/ql/src/test/queries/clientpositive/groupby_sort_3.q
index b835f95..1686170 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_3.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_3.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_4.q b/ql/src/test/queries/clientpositive/groupby_sort_4.q
index a61c551..db1a884 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_4.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_4.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_5.q b/ql/src/test/queries/clientpositive/groupby_sort_5.q
index 0d4ba42..98eed1f 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_5.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_5.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_6.q b/ql/src/test/queries/clientpositive/groupby_sort_6.q
index 752b927..2ed58d8 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_6.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_6.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_7.q b/ql/src/test/queries/clientpositive/groupby_sort_7.q
index 3e3ba7a..e8cc047 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_7.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_7.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_8.q b/ql/src/test/queries/clientpositive/groupby_sort_8.q
index f0d3a59..98f363d 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_8.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_8.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_9.q b/ql/src/test/queries/clientpositive/groupby_sort_9.q
index 296336d..eadcbb8 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_9.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_9.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
index 76a1725..9a7104d 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 set hive.groupby.skewindata=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
index 1b24aec..0a94b3a 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 set hive.groupby.skewindata=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_test_1.q b/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
index 70eef33..faf5c99 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q
index d69f49f..72682c5 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.infer.bucket.sort=true;
 
 -- Test writing to a bucketed table, the output should be bucketed by the bucketing key into the

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
index 16e8715..becbc9d 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
@@ -1,6 +1,6 @@
 set hive.exec.infer.bucket.sort=true;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 
 -- This tests inferring how data is bucketed/sorted from the operators in the reducer
 -- and populating that information in partitions' metadata, in particular, this tests

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q b/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q
index c544589..62cc2f9 100644
--- a/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q
+++ b/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 create table acid_dynamic(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
index a29b1e7..9e61fd7 100644
--- a/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
+++ b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) stored as orc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_into_with_schema2.q b/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
index a5352ec..0f21289 100644
--- a/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
+++ b/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
@@ -1,6 +1,6 @@
 -- SORT_QUERY_RESULTS;
 
-set hive.enforce.bucketing=true;
+
 
 create table studenttab10k (age2 int);
 insert into studenttab10k values(1);

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
index 639cb31..10a1d68 100644
--- a/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
+++ b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_orig_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_orig_table.q b/ql/src/test/queries/clientpositive/insert_orig_table.q
index c38bd5a..a969d1b 100644
--- a/ql/src/test/queries/clientpositive/insert_orig_table.q
+++ b/ql/src/test/queries/clientpositive/insert_orig_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_iot(
     ctinyint TINYINT,

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_update_delete.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_update_delete.q b/ql/src/test/queries/clientpositive/insert_update_delete.q
index 8dbb77c..170a18f 100644
--- a/ql/src/test/queries/clientpositive/insert_update_delete.q
+++ b/ql/src/test/queries/clientpositive/insert_update_delete.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q b/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q
index fc0cb10..3530507 100644
--- a/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q
+++ b/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) stored as orc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q b/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
index 71e0e73..5f8b8b5 100644
--- a/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
+++ b/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
@@ -1,7 +1,7 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table ivdp(i int,
                  de decimal(5,2),

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q b/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
index d0e7b0f..3b9e98b 100644
--- a/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_ivnp(ti tinyint,
                  si smallint,

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_values_orig_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_orig_table.q b/ql/src/test/queries/clientpositive/insert_values_orig_table.q
index 8fef549..63a9263 100644
--- a/ql/src/test/queries/clientpositive/insert_values_orig_table.q
+++ b/ql/src/test/queries/clientpositive/insert_values_orig_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_ivot(
     ctinyint TINYINT,

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_values_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_partitioned.q b/ql/src/test/queries/clientpositive/insert_values_partitioned.q
index c8223f7..e78b92e 100644
--- a/ql/src/test/queries/clientpositive/insert_values_partitioned.q
+++ b/ql/src/test/queries/clientpositive/insert_values_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_ivp(ti tinyint,
                  si smallint,

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_tmp_table.q b/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
index 4e4c39e..07737c0 100644
--- a/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
+++ b/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q b/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
index 50f9361..1fe911f 100644
--- a/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
+++ b/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
@@ -15,8 +15,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ',';
 insert into table bucketinput values ("firstinsert1");
 insert into table bucketinput values ("firstinsert2");
 insert into table bucketinput values ("firstinsert3");
-set hive.enforce.bucketing = true; 
-set hive.enforce.sorting=true;
+; 
+
 insert overwrite table bucketoutput1 select * from bucketinput where data like 'first%'; 
 CREATE TABLE temp1
 (

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/join_nullsafe.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join_nullsafe.q b/ql/src/test/queries/clientpositive/join_nullsafe.q
index d6eda77..e96cc71 100644
--- a/ql/src/test/queries/clientpositive/join_nullsafe.q
+++ b/ql/src/test/queries/clientpositive/join_nullsafe.q
@@ -35,8 +35,8 @@ CREATE TABLE smb_input(key int, value int);
 LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input;
 LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input;
 
-set hive.enforce.sorting = true;
-set hive.enforce.bucketing = true;
+
+;
 
 -- smbs
 CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/load_dyn_part2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part2.q b/ql/src/test/queries/clientpositive/load_dyn_part2.q
index e804971..ba9e7a8 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part2.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part2.q
@@ -8,7 +8,7 @@ create table if not exists nzhang_part_bucket (key string, value string)
 describe extended nzhang_part_bucket;
 
 set hive.merge.mapfiles=false;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition=true;
 
 explain

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/mergejoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mergejoin.q b/ql/src/test/queries/clientpositive/mergejoin.q
index 95f3d01..6cd3929 100644
--- a/ql/src/test/queries/clientpositive/mergejoin.q
+++ b/ql/src/test/queries/clientpositive/mergejoin.q
@@ -27,8 +27,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/orc_empty_files.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_empty_files.q b/ql/src/test/queries/clientpositive/orc_empty_files.q
index d3cbc5a..dd0e81a 100644
--- a/ql/src/test/queries/clientpositive/orc_empty_files.q
+++ b/ql/src/test/queries/clientpositive/orc_empty_files.q
@@ -4,7 +4,7 @@ ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'
 STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' 
 OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat';
 
-set hive.enforce.bucketing=true;
+
 set hive.exec.reducers.max = 1;
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
index f4d4d73..886f906 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) PARTITIONED by (ds string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/quotedid_smb.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/quotedid_smb.q b/ql/src/test/queries/clientpositive/quotedid_smb.q
index 38d1b99..25d1f0e 100644
--- a/ql/src/test/queries/clientpositive/quotedid_smb.q
+++ b/ql/src/test/queries/clientpositive/quotedid_smb.q
@@ -2,8 +2,8 @@
 set hive.support.quoted.identifiers=column;
 
 
-set hive.enforce.bucketing = true;  
-set hive.enforce.sorting = true;  
+;  
+  
 create table src_b(`x+1` string, `!@#$%^&*()_q` string)  
 clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets
 ;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/reduce_deduplicate.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/reduce_deduplicate.q b/ql/src/test/queries/clientpositive/reduce_deduplicate.q
index 2e26adc..5386590 100644
--- a/ql/src/test/queries/clientpositive/reduce_deduplicate.q
+++ b/ql/src/test/queries/clientpositive/reduce_deduplicate.q
@@ -1,5 +1,5 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 set hive.exec.script.trust = true;
 set hive.optimize.reducededuplication = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/sample10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample10.q b/ql/src/test/queries/clientpositive/sample10.q
index d9fe744..3aec841 100644
--- a/ql/src/test/queries/clientpositive/sample10.q
+++ b/ql/src/test/queries/clientpositive/sample10.q
@@ -2,7 +2,7 @@ set hive.exec.submitviachild=true;
 set hive.exec.submit.local.task.via.child=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.bucketing=true;
+
 set hive.exec.reducers.max=4;
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 set hive.default.fileformat=RCFILE;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin9.q b/ql/src/test/queries/clientpositive/smb_mapjoin9.q
index b959022..06820a0 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin9.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin9.q
@@ -28,8 +28,8 @@ FROM hive_test_smb_bucket1 a JOIN
 hive_test_smb_bucket2 b
 ON a.key = b.key WHERE a.ds = '2010-10-15' and b.ds='2010-10-15' and  b.key IS NOT NULL;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.exec.reducers.max = 1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_11.q b/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
index 9300638..97e3b08 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 
@@ -17,8 +17,8 @@ FROM src
 INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *;
 
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 -- Create a bucketed table
 CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_12.q b/ql/src/test/queries/clientpositive/smb_mapjoin_12.q
index 9f9748c..7a506ad 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_12.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_12.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 
@@ -19,8 +19,8 @@ INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT *
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *;
 
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 -- Create a bucketed table
 CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_13.q b/ql/src/test/queries/clientpositive/smb_mapjoin_13.q
index 056bccd..ca15fc3 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_13.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_13.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/smb_mapjoin_14.q
index f03f92e..b8b939c 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_15.q b/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
index 1e77a60..4a16c0d 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_16.q b/ql/src/test/queries/clientpositive/smb_mapjoin_16.q
index 3a3a872..bff11dd 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_16.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_16.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_17.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_17.q b/ql/src/test/queries/clientpositive/smb_mapjoin_17.q
index 238f7e0..276bfcc 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_17.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_17.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_18.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_18.q b/ql/src/test/queries/clientpositive/smb_mapjoin_18.q
index 02e3fb5..a89bc1c 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_18.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_18.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_19.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_19.q b/ql/src/test/queries/clientpositive/smb_mapjoin_19.q
index ca48f61..4695e5a 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_19.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_19.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_20.q b/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
index f70e7d5..aa1e9fa 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_21.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_21.q b/ql/src/test/queries/clientpositive/smb_mapjoin_21.q
index 09edfc1..08b13aa 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_21.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_21.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_22.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_22.q b/ql/src/test/queries/clientpositive/smb_mapjoin_22.q
index 676f46a..2f1a6b6 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_22.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_22.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_25.q b/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
index 683341b..498d337 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.exec.max.dynamic.partitions.pernode=1000000;
 set hive.exec.max.dynamic.partitions=1000000;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_6.q b/ql/src/test/queries/clientpositive/smb_mapjoin_6.q
index 2884a11..b50c494 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_6.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_6.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_7.q b/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
index ca1c749..d192036 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_8.q b/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
index 4b4e167..dc6a35f 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
@@ -1,6 +1,6 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q
index 8002ec5..efa0178 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q
@@ -1,7 +1,7 @@
 drop table table_desc1;
 drop table table_desc2;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS;
 create table table_desc2(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q
index bd0cdb2..2b787b8 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q
@@ -1,7 +1,7 @@
 drop table table_desc1;
 drop table table_desc2;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key, value)
 sorted by (key DESC, value DESC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q
index a109878..bdc550c 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q
@@ -1,7 +1,7 @@
 drop table table_desc1;
 drop table table_desc2;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key, value)
 sorted by (key DESC, value ASC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q
index 0bc5071..89a26fd 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q
@@ -1,7 +1,7 @@
 drop table table_desc1;
 drop table table_desc2;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key, value)
 sorted by (key DESC, value ASC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q
index 3505db0..9f32f53 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 
 CLUSTERED BY (key) SORTED BY (key DESC) INTO 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q
index 35b0535..e733538 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q
index 65dc7f1..fe523be 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q
index 2ec0849..4c0975d 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q
@@ -3,7 +3,7 @@ drop table table_desc2;
 drop table table_desc3;
 drop table table_desc4;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key)
 sorted by (key DESC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/stats10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats10.q b/ql/src/test/queries/clientpositive/stats10.q
index a3f375e..2ad6a4f 100644
--- a/ql/src/test/queries/clientpositive/stats10.q
+++ b/ql/src/test/queries/clientpositive/stats10.q
@@ -1,6 +1,6 @@
 set datanucleus.cache.collections=false;
 set hive.stats.autogather=true;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q b/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q
index a06bb82..631b78d 100644
--- a/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q
+++ b/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 set hive.auto.convert.join.noconditionaltask.size=10000;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/tez_fsstat.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_fsstat.q b/ql/src/test/queries/clientpositive/tez_fsstat.q
index 90201b6..35d1f58 100644
--- a/ql/src/test/queries/clientpositive/tez_fsstat.q
+++ b/ql/src/test/queries/clientpositive/tez_fsstat.q
@@ -7,8 +7,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE t1 partitio
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE t1 partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE t1 partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 set hive.stats.dbclass=fs;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/tez_smb_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_smb_1.q b/ql/src/test/queries/clientpositive/tez_smb_1.q
index 03a1fea..b8147b8 100644
--- a/ql/src/test/queries/clientpositive/tez_smb_1.q
+++ b/ql/src/test/queries/clientpositive/tez_smb_1.q
@@ -17,8 +17,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/tez_smb_empty.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_smb_empty.q b/ql/src/test/queries/clientpositive/tez_smb_empty.q
index 2427377..67acbcc 100644
--- a/ql/src/test/queries/clientpositive/tez_smb_empty.q
+++ b/ql/src/test/queries/clientpositive/tez_smb_empty.q
@@ -19,8 +19,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/tez_smb_main.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_smb_main.q b/ql/src/test/queries/clientpositive/tez_smb_main.q
index dff5112..44bb1d8 100644
--- a/ql/src/test/queries/clientpositive/tez_smb_main.q
+++ b/ql/src/test/queries/clientpositive/tez_smb_main.q
@@ -22,8 +22,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/transform_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/transform_acid.q b/ql/src/test/queries/clientpositive/transform_acid.q
index 94782f1..cf8bd24 100644
--- a/ql/src/test/queries/clientpositive/transform_acid.q
+++ b/ql/src/test/queries/clientpositive/transform_acid.q
@@ -1,7 +1,7 @@
 set hive.entity.capture.transform=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- EXCLUDE_OS_WINDOWS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/truncate_column_buckets.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/truncate_column_buckets.q b/ql/src/test/queries/clientpositive/truncate_column_buckets.q
index a2ce215..4375843 100644
--- a/ql/src/test/queries/clientpositive/truncate_column_buckets.q
+++ b/ql/src/test/queries/clientpositive/truncate_column_buckets.q
@@ -2,7 +2,7 @@
 
 CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE;
 
-set hive.enforce.bucketing=true;
+
 
 INSERT OVERWRITE TABLE test_tab SELECT * FROM src;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q b/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
index eb8e5a1..7534999 100644
--- a/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
+++ b/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
@@ -1,7 +1,7 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_all_non_partitioned.q b/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
index 3c01825..d611925 100644
--- a/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_all_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_all_partitioned.q b/ql/src/test/queries/clientpositive/update_all_partitioned.q
index e191d0a..d7aa24f 100644
--- a/ql/src/test/queries/clientpositive/update_all_partitioned.q
+++ b/ql/src/test/queries/clientpositive/update_all_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_all_types.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_all_types.q b/ql/src/test/queries/clientpositive/update_all_types.q
index 0229845..543fd09 100644
--- a/ql/src/test/queries/clientpositive/update_all_types.q
+++ b/ql/src/test/queries/clientpositive/update_all_types.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_orig_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_orig_table.q b/ql/src/test/queries/clientpositive/update_orig_table.q
index 416c841..f68b82d 100644
--- a/ql/src/test/queries/clientpositive/update_orig_table.q
+++ b/ql/src/test/queries/clientpositive/update_orig_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/update_orig_table;
 dfs -copyFromLocal ../../data/files/alltypesorc ${system:test.tmp.dir}/update_orig_table/00000_0; 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_tmp_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_tmp_table.q b/ql/src/test/queries/clientpositive/update_tmp_table.q
index a896ac7..12309e5 100644
--- a/ql/src/test/queries/clientpositive/update_tmp_table.q
+++ b/ql/src/test/queries/clientpositive/update_tmp_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_two_cols.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_two_cols.q b/ql/src/test/queries/clientpositive/update_two_cols.q
index b1972e5..8b1719b 100644
--- a/ql/src/test/queries/clientpositive/update_two_cols.q
+++ b/ql/src/test/queries/clientpositive/update_two_cols.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_where_no_match.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_where_no_match.q b/ql/src/test/queries/clientpositive/update_where_no_match.q
index d578862..8e6faaf 100644
--- a/ql/src/test/queries/clientpositive/update_where_no_match.q
+++ b/ql/src/test/queries/clientpositive/update_where_no_match.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_where_non_partitioned.q b/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
index 06c688f..b7a97c2 100644
--- a/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/update_where_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_where_partitioned.q b/ql/src/test/queries/clientpositive/update_where_partitioned.q
index 157712f..ba35e35 100644
--- a/ql/src/test/queries/clientpositive/update_where_partitioned.q
+++ b/ql/src/test/queries/clientpositive/update_where_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
index 32be5ee..fa7fff8 100644
--- a/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
@@ -1,6 +1,6 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/5562fae7/ql/src/test/queries/clientpositive/vector_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_bucket.q b/ql/src/test/queries/clientpositive/vector_bucket.q
index 9360ce0..74cbefc 100644
--- a/ql/src/test/queries/clientpositive/vector_bucket.q
+++ b/ql/src/test/queries/clientpositive/vector_bucket.q
@@ -1,7 +1,7 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 set hive.support.concurrency=true;
-set hive.enforce.bucketing=true;
+
 
 CREATE TABLE non_orc_table(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS sequencefile;