You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/06/18 22:03:34 UTC

[43/67] [abbrv] hive git commit: HIVE-19909: qtests: retire hadoop_major version specific tests; and logics (Zoltan Haindrich reviewed by Teddy Choi)

HIVE-19909: qtests: retire hadoop_major version specific tests; and logics (Zoltan Haindrich reviewed by Teddy Choi)

Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4ec256c2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4ec256c2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4ec256c2

Branch: refs/heads/master-txnstats
Commit: 4ec256c23d5986385f0ad4ff0ae43b72822b6756
Parents: ebd2c5f
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Mon Jun 18 10:35:12 2018 +0200
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Mon Jun 18 10:35:12 2018 +0200

----------------------------------------------------------------------
 .../src/test/queries/negative/cascade_dbdrop.q  |   1 -
 .../queries/negative/cascade_dbdrop_hadoop20.q  |  29 --
 .../control/AbstractCoreBlobstoreCliDriver.java |   7 -
 .../hive/cli/control/CoreAccumuloCliDriver.java |   5 -
 .../hadoop/hive/cli/control/CoreCliDriver.java  |   8 -
 .../hive/cli/control/CoreCompareCliDriver.java  |   7 +-
 .../hive/cli/control/CoreHBaseCliDriver.java    |   5 -
 .../cli/control/CoreHBaseNegativeCliDriver.java |   5 -
 .../hive/cli/control/CoreNegativeCliDriver.java |   7 +-
 .../hive/cli/control/CorePerfCliDriver.java     |  10 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java    | 110 +------
 ql/src/test/queries/clientnegative/archive1.q   |   1 -
 ql/src/test/queries/clientnegative/archive2.q   |   1 -
 ql/src/test/queries/clientnegative/archive3.q   |   1 -
 ql/src/test/queries/clientnegative/archive4.q   |   1 -
 .../queries/clientnegative/archive_corrupt.q    |   1 -
 .../queries/clientnegative/archive_insert1.q    |   1 -
 .../queries/clientnegative/archive_insert2.q    |   1 -
 .../queries/clientnegative/archive_insert3.q    |   1 -
 .../queries/clientnegative/archive_insert4.q    |   1 -
 .../queries/clientnegative/archive_multi1.q     |   1 -
 .../queries/clientnegative/archive_multi2.q     |   1 -
 .../queries/clientnegative/archive_multi3.q     |   1 -
 .../queries/clientnegative/archive_multi4.q     |   1 -
 .../queries/clientnegative/archive_multi5.q     |   1 -
 .../queries/clientnegative/archive_multi6.q     |   1 -
 .../queries/clientnegative/archive_multi7.q     |   1 -
 .../queries/clientnegative/archive_partspec1.q  |   1 -
 .../queries/clientnegative/archive_partspec2.q  |   1 -
 .../queries/clientnegative/archive_partspec3.q  |   1 -
 .../queries/clientnegative/archive_partspec4.q  |   1 -
 .../queries/clientnegative/archive_partspec5.q  |   1 -
 ql/src/test/queries/clientnegative/autolocal1.q |  16 --
 .../clientnegative/mapreduce_stack_trace.q      |   1 -
 .../mapreduce_stack_trace_turnoff.q             |   1 -
 .../alter_numbuckets_partitioned_table_h23.q    |   1 -
 .../test/queries/clientpositive/archive_multi.q |   1 -
 .../test/queries/clientpositive/auto_join14.q   |   1 -
 .../clientpositive/auto_join14_hadoop20.q       |  20 --
 .../cbo_rp_udaf_percentile_approx_23.q          |   1 -
 ql/src/test/queries/clientpositive/combine2.q   |   1 -
 .../queries/clientpositive/combine2_hadoop20.q  |  50 ----
 ql/src/test/queries/clientpositive/ctas.q       |   1 -
 .../queries/clientpositive/groupby_sort_1.q     | 283 ------------------
 .../queries/clientpositive/groupby_sort_1_23.q  |   1 -
 .../clientpositive/groupby_sort_skew_1.q        | 285 -------------------
 .../clientpositive/groupby_sort_skew_1_23.q     |   1 -
 .../infer_bucket_sort_list_bucket.q             |   1 -
 ql/src/test/queries/clientpositive/input12.q    |   1 -
 .../queries/clientpositive/input12_hadoop20.q   |  24 --
 ql/src/test/queries/clientpositive/input39.q    |   1 -
 .../queries/clientpositive/input39_hadoop20.q   |  31 --
 ql/src/test/queries/clientpositive/join14.q     |   1 -
 .../queries/clientpositive/join14_hadoop20.q    |  17 --
 .../test/queries/clientpositive/lb_fs_stats.q   |   1 -
 .../queries/clientpositive/list_bucket_dml_1.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_11.q |   1 -
 .../queries/clientpositive/list_bucket_dml_12.q |   1 -
 .../queries/clientpositive/list_bucket_dml_13.q |   1 -
 .../queries/clientpositive/list_bucket_dml_14.q |   1 -
 .../queries/clientpositive/list_bucket_dml_2.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_3.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_4.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_5.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_6.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_7.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_8.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_9.q  |   1 -
 .../list_bucket_query_multiskew_1.q             |   1 -
 .../list_bucket_query_multiskew_2.q             |   1 -
 .../list_bucket_query_multiskew_3.q             |   1 -
 .../list_bucket_query_oneskew_1.q               |   1 -
 .../list_bucket_query_oneskew_2.q               |   1 -
 .../list_bucket_query_oneskew_3.q               |   1 -
 .../test/queries/clientpositive/loadpart_err.q  |  21 --
 .../test/queries/clientpositive/recursive_dir.q |   1 -
 ql/src/test/queries/clientpositive/sample10.q   |   1 -
 .../clientpositive/sample_islocalmode_hook.q    |   1 -
 .../sample_islocalmode_hook_hadoop20.q          |  42 ---
 .../sample_islocalmode_hook_use_metadata.q      |   1 -
 .../clientpositive/skewjoin_union_remove_1.q    |   1 -
 .../clientpositive/skewjoin_union_remove_2.q    |   1 -
 .../queries/clientpositive/stats_list_bucket.q  |   1 -
 .../truncate_column_list_bucket.q               |   1 -
 .../test/queries/clientpositive/uber_reduce.q   |   1 -
 .../clientpositive/udaf_percentile_approx_20.q  |  87 ------
 .../clientpositive/udaf_percentile_approx_23.q  |   1 -
 .../queries/clientpositive/union_remove_1.q     |   1 -
 .../queries/clientpositive/union_remove_10.q    |   1 -
 .../queries/clientpositive/union_remove_11.q    |   1 -
 .../queries/clientpositive/union_remove_12.q    |   1 -
 .../queries/clientpositive/union_remove_13.q    |   1 -
 .../queries/clientpositive/union_remove_14.q    |   1 -
 .../queries/clientpositive/union_remove_15.q    |   1 -
 .../queries/clientpositive/union_remove_16.q    |   1 -
 .../queries/clientpositive/union_remove_17.q    |   1 -
 .../queries/clientpositive/union_remove_18.q    |   1 -
 .../queries/clientpositive/union_remove_19.q    |   1 -
 .../queries/clientpositive/union_remove_2.q     |   1 -
 .../queries/clientpositive/union_remove_20.q    |   1 -
 .../queries/clientpositive/union_remove_21.q    |   1 -
 .../queries/clientpositive/union_remove_22.q    |   1 -
 .../queries/clientpositive/union_remove_23.q    |   1 -
 .../queries/clientpositive/union_remove_24.q    |   1 -
 .../queries/clientpositive/union_remove_25.q    |   1 -
 .../queries/clientpositive/union_remove_3.q     |   1 -
 .../queries/clientpositive/union_remove_4.q     |   1 -
 .../queries/clientpositive/union_remove_5.q     |   1 -
 .../queries/clientpositive/union_remove_7.q     |   1 -
 .../queries/clientpositive/union_remove_8.q     |   1 -
 .../queries/clientpositive/union_remove_9.q     |   1 -
 111 files changed, 15 insertions(+), 1144 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/negative/cascade_dbdrop.q b/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
index 7f9df5e..266aa06 100644
--- a/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
+++ b/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
@@ -1,7 +1,6 @@
 
 CREATE DATABASE hbaseDB;
 
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
 -- Hadoop 0.23 changes the behavior FsShell on Exit Codes
 -- In Hadoop 0.20
 -- Exit Code == 0 on success

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/hbase-handler/src/test/queries/negative/cascade_dbdrop_hadoop20.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/negative/cascade_dbdrop_hadoop20.q b/hbase-handler/src/test/queries/negative/cascade_dbdrop_hadoop20.q
deleted file mode 100644
index 8fa8c8a..0000000
--- a/hbase-handler/src/test/queries/negative/cascade_dbdrop_hadoop20.q
+++ /dev/null
@@ -1,29 +0,0 @@
-
-CREATE DATABASE hbaseDB;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
--- Hadoop 0.23 changes the behavior FsShell on Exit Codes
--- In Hadoop 0.20
--- Exit Code == 0 on success
--- Exit code < 0 on any failure
--- In Hadoop 0.23
--- Exit Code == 0 on success
--- Exit Code < 0 on syntax/usage error
--- Exit Code > 0 operation failed
-
-CREATE TABLE hbaseDB.hbase_table_0(key int, value string)
-STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0");
-
-dfs -ls target/tmp/hbase/data/default/hbase_table_0;
-
-DROP DATABASE IF EXISTS hbaseDB CASCADE;
-
-dfs -ls target/tmp/hbase/data/hbase/default/hbase_table_0;
-
-
-
-
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
index dd80424..764a4d8 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.cli.control;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import com.google.common.base.Strings;
-
 import java.io.File;
 import java.text.SimpleDateFormat;
 import java.util.Calendar;
@@ -133,11 +131,6 @@ public abstract class AbstractCoreBlobstoreCliDriver extends CliAdapter {
       System.err.println("Begin query: " + fname);
 
       qt.addFile(fpath);
-
-      if (qt.shouldBeSkipped(fname)) {
-        System.err.println("Test " + fname + " skipped");
-        return;
-      }
       qt.cliInit(new File(fpath), false);
       int ecode = qt.executeClient(fname);
       if ((ecode == 0) ^ expectSuccess) {

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
index 9c9ba18..648a05d 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
@@ -90,11 +90,6 @@ public class CoreAccumuloCliDriver extends CliAdapter {
 
       qt.addFile(fpath);
 
-      if (qt.shouldBeSkipped(fname)) {
-        System.err.println("Test " + fname + " skipped");
-        return;
-      }
-
       qt.cliInit(new File(fpath), false);
       qt.clearTestSideEffects();
       int ecode = qt.executeClient(fname);

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
index a7ec4f3..e588592 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
@@ -165,14 +165,6 @@ public class CoreCliDriver extends CliAdapter {
       System.err.println("Begin query: " + fname);
 
       qt.addFile(fpath);
-
-      if (qt.shouldBeSkipped(fname)) {
-        LOG.info("Test " + fname + " skipped");
-        System.err.println("Test " + fname + " skipped");
-        skipped = true;
-        return;
-      }
-
       qt.cliInit(new File(fpath), false);
       int ecode = qt.executeClient(fname);
       if (ecode != 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
index c36d231..1ad76f9 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
@@ -25,7 +25,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import com.google.common.base.Strings;
 import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
@@ -33,6 +32,8 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
+
+import com.google.common.base.Strings;
 public class CoreCompareCliDriver extends CliAdapter{
 
   private static QTestUtil qt;
@@ -128,10 +129,6 @@ public class CoreCompareCliDriver extends CliAdapter{
         qt.addFile(new File(queryDirectory, versionFile), true);
       }
 
-      if (qt.shouldBeSkipped(fname)) {
-        return;
-      }
-
       int ecode = 0;
       
       qt.cliInit(new File(fpath), false);

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
index b40b8d7..fc5f75d 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
@@ -110,11 +110,6 @@ public class CoreHBaseCliDriver extends CliAdapter {
 
       qt.addFile(fpath);
 
-      if (qt.shouldBeSkipped(fname)) {
-        System.err.println("Test " + fname + " skipped");
-        return;
-      }
-
       qt.cliInit(new File(fpath), false);
 
       int ecode = qt.executeClient(fname);

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
index e828dc7..8fb88d0 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
@@ -92,11 +92,6 @@ public class CoreHBaseNegativeCliDriver extends CliAdapter {
 
       qt.addFile(fpath);
 
-      if (qt.shouldBeSkipped(fname)) {
-        System.err.println("Test " + fname + " skipped");
-        return;
-      }
-
       qt.cliInit(new File(fpath));
       qt.clearTestSideEffects();
       int ecode = qt.executeClient(fname);

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
index 176ac14..3be6f66 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 
-import com.google.common.base.Strings;
 import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
@@ -30,6 +29,8 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 
+import com.google.common.base.Strings;
+
 public class CoreNegativeCliDriver extends CliAdapter{
 
   private QTestUtil qt;
@@ -116,10 +117,6 @@ public class CoreNegativeCliDriver extends CliAdapter{
 
       qt.addFile(fpath);
 
-      if (qt.shouldBeSkipped(fname)) {
-        System.err.println("Test " + fname + " skipped");
-        return;
-      }
 
       qt.cliInit(new File(fpath), false);
       int ecode = qt.executeClient(fname);

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
index 3ae691f..af91866 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
@@ -25,13 +25,14 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 
-import com.google.common.base.Strings;
+import org.apache.hadoop.hive.ql.MetaStoreDumpUtility;
 import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
-import org.apache.hadoop.hive.ql.MetaStoreDumpUtility;
 import org.junit.After;
 import org.junit.AfterClass;
+
+import com.google.common.base.Strings;
 /**
  This is the TestPerformance Cli Driver for integrating performance regression tests
  as part of the Hive Unit tests.
@@ -125,11 +126,6 @@ public class CorePerfCliDriver extends CliAdapter{
       System.err.println("Begin query: " + fname);
 
       qt.addFile(fpath);
-
-      if (qt.shouldBeSkipped(fname)) {
-        return;
-      }
-
       qt.cliInit(new File(fpath), false);
 
       int ecode = qt.executeClient(fname);

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index f19a3ad..2106fec 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -54,7 +54,6 @@ import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
-import junit.framework.TestSuite;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.io.output.ByteArrayOutputStream;
@@ -80,6 +79,9 @@ import org.apache.hadoop.hive.llap.io.api.LlapProxy;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
+import org.apache.hadoop.hive.ql.dataset.Dataset;
+import org.apache.hadoop.hive.ql.dataset.DatasetCollection;
+import org.apache.hadoop.hive.ql.dataset.DatasetParser;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -105,9 +107,6 @@ import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.processors.HiveCommand;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.ql.dataset.DatasetCollection;
-import org.apache.hadoop.hive.ql.dataset.DatasetParser;
-import org.apache.hadoop.hive.ql.dataset.Dataset;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.HadoopShims.HdfsErasureCodingShim;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -119,14 +118,15 @@ import org.apache.tools.ant.BuildException;
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.ZooKeeper;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableList;
 
-import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import junit.framework.TestSuite;
 
 /**
  * QTestUtil.
@@ -164,7 +164,6 @@ public class QTestUtil {
   protected String overrideResultsDir;
   protected final String logDir;
   private final TreeMap<String, String> qMap;
-  private final Set<String> qSkipSet;
   private final Set<String> qSortSet;
   private final Set<String> qSortQuerySet;
   private final Set<String> qHashQuerySet;
@@ -173,7 +172,6 @@ public class QTestUtil {
   private final Set<String> qMaskStatsQuerySet;
   private final Set<String> qMaskDataSizeQuerySet;
   private final Set<String> qMaskLineageQuerySet;
-  private final Set<String> qJavaVersionSpecificOutput;
   private static final String SORT_SUFFIX = ".sorted";
   private static Set<String> srcTables;
   private final Set<String> srcUDFs;
@@ -594,7 +592,6 @@ public class QTestUtil {
     conf = queryState.getConf();
     this.hadoopVer = getHadoopMainVersion(hadoopVer);
     qMap = new TreeMap<String, String>();
-    qSkipSet = new HashSet<String>();
     qSortSet = new HashSet<String>();
     qSortQuerySet = new HashSet<String>();
     qHashQuerySet = new HashSet<String>();
@@ -603,7 +600,6 @@ public class QTestUtil {
     qMaskStatsQuerySet = new HashSet<String>();
     qMaskDataSizeQuerySet = new HashSet<String>();
     qMaskLineageQuerySet = new HashSet<String>();
-    qJavaVersionSpecificOutput = new HashSet<String>();
     this.clusterType = clusterType;
 
     HadoopShims shims = ShimLoader.getHadoopShims();
@@ -839,14 +835,6 @@ public class QTestUtil {
       return;
     }
 
-    if(checkHadoopVersionExclude(qf.getName(), query)) {
-      qSkipSet.add(qf.getName());
-    }
-
-    if (checkNeedJavaSpecificOutput(qf.getName(), query)) {
-      qJavaVersionSpecificOutput.add(qf.getName());
-    }
-
     if (matches(SORT_BEFORE_DIFF, query)) {
       qSortSet.add(qf.getName());
     } else if (matches(SORT_QUERY_RESULTS, query)) {
@@ -888,79 +876,6 @@ public class QTestUtil {
     return false;
   }
 
-  private boolean checkHadoopVersionExclude(String fileName, String query){
-
-    // Look for a hint to not run a test on some Hadoop versions
-    Pattern pattern = Pattern.compile("-- (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS\\((.*)\\)");
-
-    boolean excludeQuery = false;
-    boolean includeQuery = false;
-    Set<String> versionSet = new HashSet<String>();
-    String hadoopVer = ShimLoader.getMajorVersion();
-
-    Matcher matcher = pattern.matcher(query);
-
-    // Each qfile may include at most one INCLUDE or EXCLUDE directive.
-    //
-    // If a qfile contains an INCLUDE directive, and hadoopVer does
-    // not appear in the list of versions to include, then the qfile
-    // is skipped.
-    //
-    // If a qfile contains an EXCLUDE directive, and hadoopVer is
-    // listed in the list of versions to EXCLUDE, then the qfile is
-    // skipped.
-    //
-    // Otherwise, the qfile is included.
-
-    if (matcher.find()) {
-
-      String prefix = matcher.group(1);
-      if ("EX".equals(prefix)) {
-        excludeQuery = true;
-      } else {
-        includeQuery = true;
-      }
-
-      String versions = matcher.group(2);
-      for (String s : versions.split("\\,")) {
-        s = s.trim();
-        versionSet.add(s);
-      }
-    }
-
-    if (matcher.find()) {
-      //2nd match is not supposed to be there
-      String message = "QTestUtil: qfile " + fileName
-        + " contains more than one reference to (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS";
-      throw new UnsupportedOperationException(message);
-    }
-
-    if (excludeQuery && versionSet.contains(hadoopVer)) {
-      System.out.println("QTestUtil: " + fileName
-        + " EXCLUDE list contains Hadoop Version " + hadoopVer + ". Skipping...");
-      return true;
-    } else if (includeQuery && !versionSet.contains(hadoopVer)) {
-      System.out.println("QTestUtil: " + fileName
-        + " INCLUDE list does not contain Hadoop Version " + hadoopVer + ". Skipping...");
-      return true;
-    }
-    return false;
-  }
-
-  private boolean checkNeedJavaSpecificOutput(String fileName, String query) {
-    Pattern pattern = Pattern.compile("-- JAVA_VERSION_SPECIFIC_OUTPUT");
-    Matcher matcher = pattern.matcher(query);
-    if (matcher.find()) {
-      System.out.println("Test is flagged to generate Java version specific " +
-          "output. Since we are using Java version " + javaVersion +
-          ", we will generated Java " + javaVersion + " specific " +
-          "output file for query file " + fileName);
-      return true;
-    }
-
-    return false;
-  }
-
   /**
    * Get formatted Java version to include minor version, but
    * exclude patch level.
@@ -1613,17 +1528,8 @@ public class QTestUtil {
     return commands;
   }
 
-  public boolean shouldBeSkipped(String tname) {
-    return qSkipSet.contains(tname);
-  }
-
   private String getOutFileExtension(String fname) {
-    String outFileExtension = ".out";
-    if (qJavaVersionSpecificOutput.contains(fname)) {
-      outFileExtension = ".java" + javaVersion + ".out";
-    }
-
-    return outFileExtension;
+    return ".out";
   }
 
   public void convertSequenceFileToTextFile() throws Exception {

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive1.q b/ql/src/test/queries/clientnegative/archive1.q
index a66b5e2..6c11580 100644
--- a/ql/src/test/queries/clientnegative/archive1.q
+++ b/ql/src/test/queries/clientnegative/archive1.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE srcpart_archived LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive2.q b/ql/src/test/queries/clientnegative/archive2.q
index d879675..4bd0ef9 100644
--- a/ql/src/test/queries/clientnegative/archive2.q
+++ b/ql/src/test/queries/clientnegative/archive2.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to unarchive a non-archived partition
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 drop table tstsrcpart;
 create table tstsrcpart like srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive3.q b/ql/src/test/queries/clientnegative/archive3.q
index c09243f..fb07a60 100644
--- a/ql/src/test/queries/clientnegative/archive3.q
+++ b/ql/src/test/queries/clientnegative/archive3.q
@@ -1,6 +1,5 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests archiving a table
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 ALTER TABLE srcpart ARCHIVE;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive4.q b/ql/src/test/queries/clientnegative/archive4.q
index befdc5f..8921f6d 100644
--- a/ql/src/test/queries/clientnegative/archive4.q
+++ b/ql/src/test/queries/clientnegative/archive4.q
@@ -1,6 +1,5 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests archiving multiple partitions
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 ALTER TABLE srcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12') PARTITION (ds='2008-04-08', hr='11');

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_corrupt.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_corrupt.q b/ql/src/test/queries/clientnegative/archive_corrupt.q
index ab182d7..e5bda3f 100644
--- a/ql/src/test/queries/clientnegative/archive_corrupt.q
+++ b/ql/src/test/queries/clientnegative/archive_corrupt.q
@@ -8,7 +8,6 @@ drop table tstsrcpart;
 
 create table tstsrcpart like srcpart;
 
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20)
 -- The version of GzipCodec that is provided in Hadoop 0.20 silently ignores
 -- file format errors. However, versions of Hadoop that include
 -- HADOOP-6835 (e.g. 0.23 and 1.x) cause a Wrong File Format exception

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_insert1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_insert1.q b/ql/src/test/queries/clientnegative/archive_insert1.q
index 3663634..0b17464 100644
--- a/ql/src/test/queries/clientnegative/archive_insert1.q
+++ b/ql/src/test/queries/clientnegative/archive_insert1.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to insert into archived partition.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_insert2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_insert2.q b/ql/src/test/queries/clientnegative/archive_insert2.q
index c4d99fe..eeb3e62 100644
--- a/ql/src/test/queries/clientnegative/archive_insert2.q
+++ b/ql/src/test/queries/clientnegative/archive_insert2.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to insert into archived partition.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_insert3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_insert3.q b/ql/src/test/queries/clientnegative/archive_insert3.q
index 7a9f4fa..94ca892 100644
--- a/ql/src/test/queries/clientnegative/archive_insert3.q
+++ b/ql/src/test/queries/clientnegative/archive_insert3.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to create partition inside of archived directory.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_insert4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_insert4.q b/ql/src/test/queries/clientnegative/archive_insert4.q
index 52428f8..5d3ec6f 100644
--- a/ql/src/test/queries/clientnegative/archive_insert4.q
+++ b/ql/src/test/queries/clientnegative/archive_insert4.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to (possible) dynamic insert into archived partition.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi1.q b/ql/src/test/queries/clientnegative/archive_multi1.q
index bf60d5d..91366c2 100644
--- a/ql/src/test/queries/clientnegative/archive_multi1.q
+++ b/ql/src/test/queries/clientnegative/archive_multi1.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi2.q b/ql/src/test/queries/clientnegative/archive_multi2.q
index 92eff2f..9f342c9 100644
--- a/ql/src/test/queries/clientnegative/archive_multi2.q
+++ b/ql/src/test/queries/clientnegative/archive_multi2.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to unarchive a non-archived partition group
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 drop table tstsrcpart;
 create table tstsrcpart like srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi3.q b/ql/src/test/queries/clientnegative/archive_multi3.q
index 29e4a00..c2e86ff 100644
--- a/ql/src/test/queries/clientnegative/archive_multi3.q
+++ b/ql/src/test/queries/clientnegative/archive_multi3.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive outer partition group containing other partition inside.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi4.q b/ql/src/test/queries/clientnegative/archive_multi4.q
index 98d766a..d5eb315 100644
--- a/ql/src/test/queries/clientnegative/archive_multi4.q
+++ b/ql/src/test/queries/clientnegative/archive_multi4.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive inner partition contained in archived partition group.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi5.q b/ql/src/test/queries/clientnegative/archive_multi5.q
index 1eeab17..73a684f 100644
--- a/ql/src/test/queries/clientnegative/archive_multi5.q
+++ b/ql/src/test/queries/clientnegative/archive_multi5.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to unarchive outer partition group containing other partition inside.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi6.q b/ql/src/test/queries/clientnegative/archive_multi6.q
index d335db9..5df07a3 100644
--- a/ql/src/test/queries/clientnegative/archive_multi6.q
+++ b/ql/src/test/queries/clientnegative/archive_multi6.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to unarchive inner partition contained in archived partition group.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi7.q b/ql/src/test/queries/clientnegative/archive_multi7.q
index 4c3f06e..65e1025 100644
--- a/ql/src/test/queries/clientnegative/archive_multi7.q
+++ b/ql/src/test/queries/clientnegative/archive_multi7.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive a partition group with custom locations.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE tstsrcpart LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec1.q b/ql/src/test/queries/clientnegative/archive_partspec1.q
index ead4268..9dd3e23 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec1.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec1.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE srcpart_archived LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec2.q b/ql/src/test/queries/clientnegative/archive_partspec2.q
index ff4581f..0e55217 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec2.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec2.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE srcpart_archived LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec3.q b/ql/src/test/queries/clientnegative/archive_partspec3.q
index ff29486..94f984c 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec3.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec3.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE srcpart_archived LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec4.q b/ql/src/test/queries/clientnegative/archive_partspec4.q
index f27496f..48c5ec6 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec4.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec4.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE srcpart_archived LIKE srcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec5.q b/ql/src/test/queries/clientnegative/archive_partspec5.q
index d5df078..a8441be 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec5.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec5.q
@@ -1,7 +1,6 @@
 --! qt:dataset:srcpart
 set hive.archive.enabled = true;
 -- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 CREATE TABLE srcpart_archived (key string, value string) partitioned by (ds string, hr int, min int);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/autolocal1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/autolocal1.q b/ql/src/test/queries/clientnegative/autolocal1.q
deleted file mode 100644
index 51f5bd5..0000000
--- a/ql/src/test/queries/clientnegative/autolocal1.q
+++ /dev/null
@@ -1,16 +0,0 @@
---! qt:dataset:src
-set mapred.job.tracker=abracadabra;
-set hive.exec.mode.local.auto.inputbytes.max=1;
-set hive.exec.mode.local.auto=true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20)
--- hadoop0.23 changes the behavior of JobClient initialization
--- in hadoop0.20, JobClient initialization tries to get JobTracker's address
--- this throws the expected IllegalArgumentException
--- in hadoop0.23, JobClient initialization only initializes cluster
--- and get user group information
--- not attempts to get JobTracker's address
--- no IllegalArgumentException thrown in JobClient Initialization
--- an exception is thrown when JobClient submitJob
-
-SELECT key FROM src; 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q b/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q
index 37bb54d..953a7e4 100644
--- a/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q
+++ b/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q
@@ -7,7 +7,6 @@ set hive.exec.failure.hooks=org.apache.hadoop.hive.ql.hooks.VerifySessionStateSt
 
 FROM src SELECT TRANSFORM(key, value) USING 'script_does_not_exist' AS (key, value);
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- Hadoop 0.23 changes the getTaskDiagnostics behavior
 -- The Error Code of hive failure MapReduce job changes
 -- In Hadoop 0.20

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q b/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q
index 946374b..d1b588b 100644
--- a/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q
+++ b/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q
@@ -7,7 +7,6 @@ set hive.exec.failure.hooks=org.apache.hadoop.hive.ql.hooks.VerifySessionStateSt
 
 FROM src SELECT TRANSFORM(key, value) USING 'script_does_not_exist' AS (key, value);
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- Hadoop 0.23 changes the getTaskDiagnostics behavior
 -- The Error Code of hive failure MapReduce job changes
 -- In Hadoop 0.20

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
index 163ca8f..d4e1a19 100644
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
+++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
@@ -1,5 +1,4 @@
 --! qt:dataset:src
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
 create table tst1_n1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
 
 alter table tst1_n1 clustered by (key) into 8 buckets;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/archive_multi.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/archive_multi.q b/ql/src/test/queries/clientpositive/archive_multi.q
index 60cb4a9..b372ea2 100644
--- a/ql/src/test/queries/clientpositive/archive_multi.q
+++ b/ql/src/test/queries/clientpositive/archive_multi.q
@@ -23,7 +23,6 @@ select key, value from default.srcpart where ds='2008-04-09' and hr='11';
 insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-09', hr='12')
 select key, value from default.srcpart where ds='2008-04-09' and hr='12';
 
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
 
 SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col
 FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/auto_join14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_join14.q b/ql/src/test/queries/clientpositive/auto_join14.q
index 11829cc..1f6e0eb 100644
--- a/ql/src/test/queries/clientpositive/auto_join14.q
+++ b/ql/src/test/queries/clientpositive/auto_join14.q
@@ -4,7 +4,6 @@ set hive.mapred.mode=nonstrict;
 
 set hive.auto.convert.join = true;
 
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 
 CREATE TABLE dest1_n83(c1 INT, c2 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q b/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
deleted file mode 100644
index 0c6b900..0000000
--- a/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
+++ /dev/null
@@ -1,20 +0,0 @@
---! qt:dataset:srcpart
---! qt:dataset:src
-
-set hive.auto.convert.join = true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
-
-CREATE TABLE dest1_n74(c1 INT, c2 STRING) STORED AS TEXTFILE;
-
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto=true;
-
-explain
-FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
-INSERT OVERWRITE TABLE dest1_n74 SELECT src.key, srcpart.value;
-
-FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
-INSERT OVERWRITE TABLE dest1_n74 SELECT src.key, srcpart.value;
-
-SELECT sum(hash(dest1_n74.c1,dest1_n74.c2)) FROM dest1_n74;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q b/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
index ba198eb..d59bd24 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
@@ -2,7 +2,6 @@ set hive.strict.checks.bucketing=false;
 
 set hive.mapred.mode=nonstrict;
 set hive.cbo.returnpath.hiveop=true;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- 0.23 changed input order of data in reducer task, which affects result of percentile_approx
 
 CREATE TABLE bucket_n1 (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC)  INTO 4 BUCKETS STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/combine2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/combine2.q b/ql/src/test/queries/clientpositive/combine2.q
index 5b19bc0..9d33c1a 100644
--- a/ql/src/test/queries/clientpositive/combine2.q
+++ b/ql/src/test/queries/clientpositive/combine2.q
@@ -17,7 +17,6 @@ set hive.merge.smallfiles.avgsize=0;
 
 create table combine2_n0(key string) partitioned by (value string);
 
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 -- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0
 -- in an attempt to force the generation of multiple splits and multiple output files.
 -- However, Hadoop 0.20 is incapable of generating splits smaller than the block size

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/combine2_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/combine2_hadoop20.q b/ql/src/test/queries/clientpositive/combine2_hadoop20.q
deleted file mode 100644
index 3f45ae5..0000000
--- a/ql/src/test/queries/clientpositive/combine2_hadoop20.q
+++ /dev/null
@@ -1,50 +0,0 @@
---! qt:dataset:srcpart
---! qt:dataset:src
-USE default;
-
-set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
-set hive.exec.dynamic.partition=true;
-set hive.exec.dynamic.partition.mode=nonstrict;
-set mapred.cache.shared.enabled=false;
-set hive.merge.smallfiles.avgsize=0;
-
--- SORT_QUERY_RESULTS
-
-create table combine2(key string) partitioned by (value string);
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0
--- in an attempt to force the generation of multiple splits and multiple output files.
--- However, Hadoop 0.20 is incapable of generating splits smaller than the block size
--- when using CombineFileInputFormat, so only one split is generated. This has a
--- significant impact on the results results of this test.
--- This issue was fixed in MAPREDUCE-2046 which is included in 0.22.
-
-insert overwrite table combine2 partition(value) 
-select * from (
-   select key, value from src where key < 10
-   union all 
-   select key, '|' as value from src where key = 11
-   union all
-   select key, '2010-04-21 09:45:00' value from src where key = 19) s;
-
-show partitions combine2;
-
-explain
-select key, value from combine2 where value is not null;
-
-select key, value from combine2 where value is not null;
-
-explain extended
-select count(1) from combine2 where value is not null;
-
-select count(1) from combine2 where value is not null;
-
-explain
-select ds, count(1) from srcpart where ds is not null group by ds;
-
-select ds, count(1) from srcpart where ds is not null group by ds;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/ctas.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ctas.q b/ql/src/test/queries/clientpositive/ctas.q
index dbed475..c4fdda1 100644
--- a/ql/src/test/queries/clientpositive/ctas.q
+++ b/ql/src/test/queries/clientpositive/ctas.q
@@ -1,6 +1,5 @@
 --! qt:dataset:src
 set hive.explain.user=false;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 -- SORT_QUERY_RESULTS
 
 create table nzhang_Tmp(a int, b string);

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/groupby_sort_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1.q b/ql/src/test/queries/clientpositive/groupby_sort_1.q
deleted file mode 100644
index 46ec0be..0000000
--- a/ql/src/test/queries/clientpositive/groupby_sort_1.q
+++ /dev/null
@@ -1,283 +0,0 @@
-;
-
-set hive.exec.reducers.max = 10;
-set hive.map.groupby.sorted=true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- SORT_QUERY_RESULTS
-
-CREATE TABLE T1_n4(key STRING, val STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n4;
-
--- perform an insert to make sure there are 2 files
-INSERT OVERWRITE TABLE T1_n4 select key, val from T1_n4;
-
-CREATE TABLE outputTbl1_n2(key int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key
--- matches the sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM T1_n4 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM T1_n4 GROUP BY key;
-
-SELECT * FROM outputTbl1_n2;
-
-CREATE TABLE outputTbl2_n0(key1 int, key2 string, cnt int);
-
--- no map-side group by even if the group by key is a superset of sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl2_n0
-SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val;
-
-INSERT OVERWRITE TABLE outputTbl2_n0
-SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val;
-
-SELECT * FROM outputTbl2_n0;
-
--- It should work for sub-queries
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM (SELECT key, val FROM T1_n4) subq1 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM (SELECT key, val FROM T1_n4) subq1 GROUP BY key;
-
-SELECT * FROM outputTbl1_n2;
-
--- It should work for sub-queries with column aliases
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n4) subq1 GROUP BY k;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n4) subq1 GROUP BY k;
-
-SELECT * FROM outputTbl1_n2;
-
-CREATE TABLE outputTbl3(key1 int, key2 int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key contains a constant followed
--- by a match to the sorted key
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl3
-SELECT 1, key, count(1) FROM T1_n4 GROUP BY 1, key;
-
-INSERT OVERWRITE TABLE outputTbl3
-SELECT 1, key, count(1) FROM T1_n4 GROUP BY 1, key;
-
-SELECT * FROM outputTbl3;
-
-CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int);
-
--- no map-side group by if the group by key contains a constant followed by another column
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T1_n4 GROUP BY key, 1, val;
-
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T1_n4 GROUP BY key, 1, val;
-
-SELECT * FROM outputTbl4;
-
--- no map-side group by if the group by key contains a function
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl3
-SELECT key, key + 1, count(1) FROM T1_n4 GROUP BY key, key + 1;
-
-INSERT OVERWRITE TABLE outputTbl3
-SELECT key, key + 1, count(1) FROM T1_n4 GROUP BY key, key + 1;
-
-SELECT * FROM outputTbl3;
-
--- it should not matter what follows the group by
--- test various cases
-
--- group by followed by another group by
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key + key, sum(cnt) from
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1
-group by key + key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key + key, sum(cnt) from
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1
-group by key + key;
-
-SELECT * FROM outputTbl1_n2;
-
--- group by followed by a union
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n4 GROUP BY key
-  UNION ALL
-SELECT key, count(1) FROM T1_n4 GROUP BY key
-) subq1;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n4 GROUP BY key
-  UNION ALL
-SELECT key, count(1) FROM T1_n4 GROUP BY key
-) subq1;
-
-SELECT * FROM outputTbl1_n2;
-
--- group by followed by a union where one of the sub-queries is map-side group by
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n4 GROUP BY key
-  UNION ALL
-SELECT key + key as key, count(1) FROM T1_n4 GROUP BY key + key
-) subq1;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT * FROM (
-SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key
-  UNION ALL
-SELECT key + key as key, count(1) as cnt FROM T1_n4 GROUP BY key + key
-) subq1;
-
-SELECT * FROM outputTbl1_n2;
-
--- group by followed by a join
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1
-JOIN
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq2
-ON subq1.key = subq2.key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1
-JOIN
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq2
-ON subq1.key = subq2.key;
-
-SELECT * FROM outputTbl1_n2;
-
--- group by followed by a join where one of the sub-queries can be performed in the mapper
-EXPLAIN EXTENDED 
-SELECT * FROM 
-(SELECT key, count(1) FROM T1_n4 GROUP BY key) subq1
-JOIN
-(SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val) subq2
-ON subq1.key = subq2.key;
-
-CREATE TABLE T2_n3(key STRING, val STRING)
-CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
-
--- perform an insert to make sure there are 2 files
-INSERT OVERWRITE TABLE T2_n3 select key, val from T1_n4;
-
--- no mapside sort group by if the group by is a prefix of the sorted key
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM T2_n3 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM T2_n3 GROUP BY key;
-
-SELECT * FROM outputTbl1_n2;
-
--- The plan should be converted to a map-side group by if the group by key contains a constant in between the
--- sorted keys
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T2_n3 GROUP BY key, 1, val;
-
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T2_n3 GROUP BY key, 1, val;
-
-SELECT * FROM outputTbl4;
-
-CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key contains a constant in between the
--- sorted keys followed by anything
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl5
-SELECT key, 1, val, 2, count(1) FROM T2_n3 GROUP BY key, 1, val, 2;
-
-INSERT OVERWRITE TABLE outputTbl5
-SELECT key, 1, val, 2, count(1) FROM T2_n3 GROUP BY key, 1, val, 2;
-
-SELECT * FROM outputTbl5;
-
--- contants from sub-queries should work fine
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, constant, val, count(1) from 
-(SELECT key, 1 as constant, val from T2_n3)subq
-group by key, constant, val;
-
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, constant, val, count(1) from 
-(SELECT key, 1 as constant, val from T2_n3)subq
-group by key, constant, val;
-
-SELECT * FROM outputTbl4;
-
--- multiple levels of contants from sub-queries should work fine
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4
-select key, constant3, val, count(1) from
-(
-SELECT key, constant as constant2, val, 2 as constant3 from 
-(SELECT key, 1 as constant, val from T2_n3)subq
-)subq2
-group by key, constant3, val;
-
-INSERT OVERWRITE TABLE outputTbl4
-select key, constant3, val, count(1) from
-(
-SELECT key, constant as constant2, val, 2 as constant3 from 
-(SELECT key, 1 as constant, val from T2_n3)subq
-)subq2
-group by key, constant3, val;
-
-SELECT * FROM outputTbl4;
-
-set hive.map.aggr=true;
-set hive.multigroupby.singlereducer=false;
-set mapred.reduce.tasks=31;
-
-CREATE TABLE DEST1_n7(key INT, cnt INT);
-CREATE TABLE DEST2_n1(key INT, val STRING, cnt INT);
-
-SET hive.exec.compress.intermediate=true;
-SET hive.exec.compress.output=true; 
-
-EXPLAIN
-FROM T2_n3
-INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val;
-
-FROM T2_n3
-INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val;
-
-select * from DEST1_n7;
-select * from DEST2_n1;
-
--- multi-table insert with a sub-query
-EXPLAIN
-FROM (select key, val from T2_n3 where key = 8) x
-INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val;
-
-FROM (select key, val from T2_n3 where key = 8) x
-INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val;
-
-select * from DEST1_n7;
-select * from DEST2_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
index b27aec4..c97fcdd 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
 -- SORT_QUERY_RESULTS
 
 CREATE TABLE T1_n80(key STRING, val STRING)

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
deleted file mode 100644
index 7836c4d..0000000
--- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
+++ /dev/null
@@ -1,285 +0,0 @@
-;
-
-set hive.exec.reducers.max = 10;
-set hive.map.groupby.sorted=true;
-set hive.groupby.skewindata=true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- SORT_QUERY_RESULTS
-
-CREATE TABLE T1_n35(key STRING, val STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n35;
-
--- perform an insert to make sure there are 2 files
-INSERT OVERWRITE TABLE T1_n35 select key, val from T1_n35;
-
-CREATE TABLE outputTbl1_n8(key int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key
--- matches the sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM T1_n35 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM T1_n35 GROUP BY key;
-
-SELECT * FROM outputTbl1_n8;
-
-CREATE TABLE outputTbl2_n2(key1 int, key2 string, cnt int);
-
--- no map-side group by even if the group by key is a superset of sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl2_n2
-SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val;
-
-INSERT OVERWRITE TABLE outputTbl2_n2
-SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val;
-
-SELECT * FROM outputTbl2_n2;
-
--- It should work for sub-queries
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key;
-
-SELECT * FROM outputTbl1_n8;
-
--- It should work for sub-queries with column aliases
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k;
-
-SELECT * FROM outputTbl1_n8;
-
-CREATE TABLE outputTbl3_n0(key1 int, key2 int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key contains a constant followed
--- by a match to the sorted key
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl3_n0
-SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key;
-
-INSERT OVERWRITE TABLE outputTbl3_n0
-SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key;
-
-SELECT * FROM outputTbl3_n0;
-
-CREATE TABLE outputTbl4_n0(key1 int, key2 int, key3 string, cnt int);
-
--- no map-side group by if the group by key contains a constant followed by another column
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val;
-
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val;
-
-SELECT * FROM outputTbl4_n0;
-
--- no map-side group by if the group by key contains a function
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl3_n0
-SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1;
-
-INSERT OVERWRITE TABLE outputTbl3_n0
-SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1;
-
-SELECT * FROM outputTbl3_n0;
-
--- it should not matter what follows the group by
--- test various cases
-
--- group by followed by another group by
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key + key, sum(cnt) from
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1
-group by key + key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key + key, sum(cnt) from
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1
-group by key + key;
-
-SELECT * FROM outputTbl1_n8;
-
--- group by followed by a union
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n35 GROUP BY key
-  UNION ALL
-SELECT key, count(1) FROM T1_n35 GROUP BY key
-) subq1;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n35 GROUP BY key
-  UNION ALL
-SELECT key, count(1) FROM T1_n35 GROUP BY key
-) subq1;
-
-SELECT * FROM outputTbl1_n8;
-
--- group by followed by a union where one of the sub-queries is map-side group by
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n35 GROUP BY key
-  UNION ALL
-SELECT key + key as key, count(1) FROM T1_n35 GROUP BY key + key
-) subq1;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT * FROM (
-SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key
-  UNION ALL
-SELECT key + key as key, count(1) as cnt FROM T1_n35 GROUP BY key + key
-) subq1;
-
-SELECT * FROM outputTbl1_n8;
-
--- group by followed by a join
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1
-JOIN
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2
-ON subq1.key = subq2.key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1
-JOIN
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2
-ON subq1.key = subq2.key;
-
-SELECT * FROM outputTbl1_n8;
-
--- group by followed by a join where one of the sub-queries can be performed in the mapper
-EXPLAIN EXTENDED 
-SELECT * FROM 
-(SELECT key, count(1) FROM T1_n35 GROUP BY key) subq1
-JOIN
-(SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val) subq2
-ON subq1.key = subq2.key;
-
-CREATE TABLE T2_n23(key STRING, val STRING)
-CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
-
--- perform an insert to make sure there are 2 files
-INSERT OVERWRITE TABLE T2_n23 select key, val from T1_n35;
-
--- no mapside sort group by if the group by is a prefix of the sorted key
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM T2_n23 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM T2_n23 GROUP BY key;
-
-SELECT * FROM outputTbl1_n8;
-
--- The plan should be converted to a map-side group by if the group by key contains a constant in between the
--- sorted keys
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, 1, val, count(1) FROM T2_n23 GROUP BY key, 1, val;
-
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, 1, val, count(1) FROM T2_n23 GROUP BY key, 1, val;
-
-SELECT * FROM outputTbl4_n0;
-
-CREATE TABLE outputTbl5_n0(key1 int, key2 int, key3 string, key4 int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key contains a constant in between the
--- sorted keys followed by anything
-EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl5_n0
-SELECT key, 1, val, 2, count(1) FROM T2_n23 GROUP BY key, 1, val, 2;
-
-INSERT OVERWRITE TABLE outputTbl5_n0
-SELECT key, 1, val, 2, count(1) FROM T2_n23 GROUP BY key, 1, val, 2;
-
-SELECT * FROM outputTbl5_n0 
-ORDER BY key1, key2, key3, key4;
-
--- contants from sub-queries should work fine
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, constant, val, count(1) from 
-(SELECT key, 1 as constant, val from T2_n23)subq
-group by key, constant, val;
-
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, constant, val, count(1) from 
-(SELECT key, 1 as constant, val from T2_n23)subq
-group by key, constant, val;
-
-SELECT * FROM outputTbl4_n0;
-
--- multiple levels of contants from sub-queries should work fine
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4_n0
-select key, constant3, val, count(1) from
-(
-SELECT key, constant as constant2, val, 2 as constant3 from 
-(SELECT key, 1 as constant, val from T2_n23)subq
-)subq2
-group by key, constant3, val;
-
-INSERT OVERWRITE TABLE outputTbl4_n0
-select key, constant3, val, count(1) from
-(
-SELECT key, constant as constant2, val, 2 as constant3 from 
-(SELECT key, 1 as constant, val from T2_n23)subq
-)subq2
-group by key, constant3, val;
-
-SELECT * FROM outputTbl4_n0;
-
-set hive.map.aggr=true;
-set hive.multigroupby.singlereducer=false;
-set mapred.reduce.tasks=31;
-
-CREATE TABLE DEST1_n30(key INT, cnt INT);
-CREATE TABLE DEST2_n6(key INT, val STRING, cnt INT);
-
-SET hive.exec.compress.intermediate=true;
-SET hive.exec.compress.output=true; 
-
-EXPLAIN
-FROM T2_n23
-INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val;
-
-FROM T2_n23
-INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val;
-
-select * from DEST1_n30;
-select * from DEST2_n6;
-
--- multi-table insert with a sub-query
-EXPLAIN
-FROM (select key, val from T2_n23 where key = 8) x
-INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val;
-
-FROM (select key, val from T2_n23 where key = 8) x
-INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val;
-
-select * from DEST1_n30;
-select * from DEST2_n6;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
index 8919f3b..f5a2c59 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
@@ -3,7 +3,6 @@ set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 set hive.groupby.skewindata=true;
 
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
 -- SORT_QUERY_RESULTS
 
 CREATE TABLE T1_n56(key STRING, val STRING)

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q
index 5479468..c9fd712 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q
@@ -6,7 +6,6 @@ set mapred.input.dir.recursive=true;
 
 -- This tests that bucketing/sorting metadata is not inferred for tables with list bucketing
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
 -- create a skewed table
 CREATE TABLE list_bucketing_table (key STRING, value STRING) 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/input12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input12.q b/ql/src/test/queries/clientpositive/input12.q
index b75ce20..16698ed 100644
--- a/ql/src/test/queries/clientpositive/input12.q
+++ b/ql/src/test/queries/clientpositive/input12.q
@@ -4,7 +4,6 @@ set mapreduce.framework.name=yarn;
 set mapreduce.jobtracker.address=localhost:58;
 set hive.exec.mode.local.auto=true;
 
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 
 CREATE TABLE dest1_n122(key INT, value STRING) STORED AS TEXTFILE;
 CREATE TABLE dest2_n32(key INT, value STRING) STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/input12_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input12_hadoop20.q b/ql/src/test/queries/clientpositive/input12_hadoop20.q
deleted file mode 100644
index e9f2baf..0000000
--- a/ql/src/test/queries/clientpositive/input12_hadoop20.q
+++ /dev/null
@@ -1,24 +0,0 @@
---! qt:dataset:src
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto=true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
-
-CREATE TABLE dest1_n88(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest2_n23(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest3_n2(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
-
-EXPLAIN
-FROM src 
-INSERT OVERWRITE TABLE dest1_n88 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2_n23 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3_n2 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
-
-FROM src 
-INSERT OVERWRITE TABLE dest1_n88 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2_n23 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3_n2 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
-
-SELECT dest1_n88.* FROM dest1_n88;
-SELECT dest2_n23.* FROM dest2_n23;
-SELECT dest3_n2.* FROM dest3_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/input39.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input39.q b/ql/src/test/queries/clientpositive/input39.q
index b757f8e..2efdbc6 100644
--- a/ql/src/test/queries/clientpositive/input39.q
+++ b/ql/src/test/queries/clientpositive/input39.q
@@ -1,5 +1,4 @@
 --! qt:dataset:src
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 
 
 create table t1_n121(key string, value string) partitioned by (ds string);

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/input39_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input39_hadoop20.q b/ql/src/test/queries/clientpositive/input39_hadoop20.q
deleted file mode 100644
index 26f2a6e..0000000
--- a/ql/src/test/queries/clientpositive/input39_hadoop20.q
+++ /dev/null
@@ -1,31 +0,0 @@
---! qt:dataset:src
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-
-
-create table t1_n77(key string, value string) partitioned by (ds string);
-create table t2_n46(key string, value string) partitioned by (ds string);
-
-insert overwrite table t1_n77 partition (ds='1')
-select key, value from src;
-
-insert overwrite table t1_n77 partition (ds='2')
-select key, value from src;
-
-insert overwrite table t2_n46 partition (ds='1')
-select key, value from src;
-
-set hive.test.mode=true;
-set hive.mapred.mode=strict;
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto=true;
-
-explain
-select count(1) from t1_n77 join t2_n46 on t1_n77.key=t2_n46.key where t1_n77.ds='1' and t2_n46.ds='1';
-
-select count(1) from t1_n77 join t2_n46 on t1_n77.key=t2_n46.key where t1_n77.ds='1' and t2_n46.ds='1';
-
-set hive.test.mode=false;
-set mapred.job.tracker;
-
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/join14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join14.q b/ql/src/test/queries/clientpositive/join14.q
index e0f725c..20b914e 100644
--- a/ql/src/test/queries/clientpositive/join14.q
+++ b/ql/src/test/queries/clientpositive/join14.q
@@ -2,7 +2,6 @@
 --! qt:dataset:src
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
 
 CREATE TABLE dest1_n164(c1 INT, c2 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/join14_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join14_hadoop20.q b/ql/src/test/queries/clientpositive/join14_hadoop20.q
deleted file mode 100644
index 489ad0c..0000000
--- a/ql/src/test/queries/clientpositive/join14_hadoop20.q
+++ /dev/null
@@ -1,17 +0,0 @@
---! qt:dataset:srcpart
---! qt:dataset:src
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-
-CREATE TABLE dest1_n49(c1 INT, c2 STRING) STORED AS TEXTFILE;
-
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto=true;
-
-EXPLAIN
-FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
-INSERT OVERWRITE TABLE dest1_n49 SELECT src.key, srcpart.value;
-
-FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
-INSERT OVERWRITE TABLE dest1_n49 SELECT src.key, srcpart.value;
-
-select dest1_n49.* from dest1_n49;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/lb_fs_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lb_fs_stats.q b/ql/src/test/queries/clientpositive/lb_fs_stats.q
index 7cadaf9..3bc3924 100644
--- a/ql/src/test/queries/clientpositive/lb_fs_stats.q
+++ b/ql/src/test/queries/clientpositive/lb_fs_stats.q
@@ -7,7 +7,6 @@ set mapred.input.dir.recursive=true;
 set hive.stats.dbclass=fs;
 -- Tests truncating a column from a list bucketing table
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
 CREATE TABLE test_tab_n0 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_1.q b/ql/src/test/queries/clientpositive/list_bucket_dml_1.q
index 23e303f..40d5393 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_1.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_1.q
@@ -9,7 +9,6 @@ set mapred.input.dir.recursive=true;
 
 -- list bucketing DML : dynamic partition and 2 stage query plan.
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
 -- create a skewed table
 create table list_bucketing_dynamic_part_n0 (key String, value String) 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_11.q b/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
index e0acf2a..7cac6d7 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
@@ -6,7 +6,6 @@ set hive.merge.mapredfiles=false;
 
 -- Ensure it works if skewed column is not the first column in the table columns
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
 -- list bucketing DML: static partition. multiple skewed columns.
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_12.q b/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
index d81355a..be02096 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
@@ -6,7 +6,6 @@ set hive.merge.mapredfiles=false;
 
 -- Ensure it works if skewed column is not the first column in the table columns
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
 
 -- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_13.q b/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
index 091cf0c..77b010d 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
@@ -6,7 +6,6 @@ set hive.merge.mapredfiles=false;
 
 -- Ensure skewed value map has escaped directory name
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
 
 -- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_14.q b/ql/src/test/queries/clientpositive/list_bucket_dml_14.q
index a0f9c2c..f640853 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_14.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_14.q
@@ -8,7 +8,6 @@ set mapred.input.dir.recursive=true;
 
 -- list bucketing DML : unpartitioned table and 2 stage query plan.
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
 -- create a skewed table
 create table list_bucketing (key String, value String)

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_2.q b/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
index b80e51d..6a46828 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
@@ -9,7 +9,6 @@ set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set hive.stats.reliable=true;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
 
 -- list bucketing DML: static partition. multiple skewed columns.

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_3.q b/ql/src/test/queries/clientpositive/list_bucket_dml_3.q
index 08c8ce2..0c1e43a 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_3.q
@@ -9,7 +9,6 @@ set mapred.input.dir.recursive=true;
 
 -- list bucketing DML : static partition and 2 stage query plan.
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
 -- create a skewed table
 create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES;

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_4.q b/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
index a13915e..f14efe2 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
@@ -8,7 +8,6 @@ set mapred.input.dir.recursive=true;
 set hive.merge.mapfiles=false;	
 set hive.merge.mapredfiles=false;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
 
 -- list bucketing DML: static partition. multiple skewed columns. merge.

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_5.q b/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
index bbfb317..996fa1d 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
@@ -9,7 +9,6 @@ set mapred.input.dir.recursive=true;
 
 -- list bucketing DML: multiple skewed columns. 2 stages
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
 
 -- create a skewed table

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_6.q b/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
index b9a526b..e761c86 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
@@ -46,7 +46,6 @@ set hive.merge.mapredfiles=false;
 -- with merge
 -- 118 000002_0 
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
 
 -- create a skewed table

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_7.q b/ql/src/test/queries/clientpositive/list_bucket_dml_7.q
index 2c96407..ce87e4f 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_7.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_7.q
@@ -9,7 +9,6 @@ set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set hive.merge.rcfile.block.level=true;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
 
 -- list bucketing DML : dynamic partition (one level) , merge , one skewed column

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
index 87f2624..ca19d79 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
@@ -48,7 +48,6 @@ set hive.merge.mapredfiles=false;
 -- with merge
 -- 118 000002_0 
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
 -- create a skewed table
 create table list_bucketing_dynamic_part_n2 (key String, value String) 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
index e130f05..70ef57b 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
@@ -8,7 +8,6 @@ set mapred.input.dir.recursive=true;
 set hive.merge.mapfiles=false;	
 set hive.merge.mapredfiles=false;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
 
 -- list bucketing DML: static partition. multiple skewed columns. merge.

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q
index 9e377d5..b8bf944 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q
@@ -6,7 +6,6 @@ set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)	
 
 -- List bucketing query logic test case. We simulate the directory structure by DML here. 
 -- Test condition: