You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2015/10/23 18:29:01 UTC

hive git commit: HIVE-12224 : Remove HOLD_DDLTIME (Ashutosh Chauhan via Pengcheng Xiong)

Repository: hive
Updated Branches:
  refs/heads/master 27ee7b559 -> 87989da55


HIVE-12224 : Remove HOLD_DDLTIME (Ashutosh Chauhan via Pengcheng Xiong)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/87989da5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/87989da5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/87989da5

Branch: refs/heads/master
Commit: 87989da5548585f1b0d439e8fb9047ede28451e4
Parents: 27ee7b5
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Wed Oct 21 13:36:49 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Fri Oct 23 09:25:10 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/history/TestHiveHistory.java |   2 +-
 .../test/resources/testconfiguration.properties |   1 -
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   4 -
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |   8 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  61 +++---
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |   1 -
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |   1 -
 .../hadoop/hive/ql/parse/SelectClauseParser.g   |   1 -
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  46 -----
 .../hadoop/hive/ql/plan/LoadTableDesc.java      |  14 --
 .../hadoop/hive/ql/exec/TestExecDriver.java     |   2 +-
 ql/src/test/queries/clientnegative/ddltime.q    |   6 -
 ql/src/test/queries/clientpositive/ddltime.q    |  45 -----
 .../test/results/clientnegative/ddltime.q.out   |   9 -
 .../test/results/clientpositive/ddltime.q.out   | 188 -------------------
 15 files changed, 31 insertions(+), 358 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
index 76c1636..c046708 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
@@ -103,7 +103,7 @@ public class TestHiveHistory extends TestCase {
         db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
         db.createTable(src, cols, null, TextInputFormat.class,
             IgnoreKeyTextOutputFormat.class);
-        db.loadTable(hadoopDataFile[i], src, false, false, false, false, false);
+        db.loadTable(hadoopDataFile[i], src, false, false, false, false);
         i++;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index fa1d89d..13efc58 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -469,7 +469,6 @@ beeline.positive.exclude=add_part_exist.q,\
   database.q,\
   database_location.q,\
   database_properties.q,\
-  ddltime.q,\
   describe_database_json.q,\
   drop_database_removes_partition_dirs.q,\
   escape1.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 87c2830..c080570 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -25,8 +25,6 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.antlr.runtime.tree.Tree;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.ASTNodeOrigin;
 
@@ -192,8 +190,6 @@ public enum ErrorMsg {
   UNARCHIVE_ON_MULI_PARTS(10109, "ARCHIVE can only be run on a single partition"),
   ARCHIVE_ON_TABLE(10110, "ARCHIVE can only be run on partitions"),
   RESERVED_PART_VAL(10111, "Partition value contains a reserved substring"),
-  HOLD_DDLTIME_ON_NONEXIST_PARTITIONS(10112, "HOLD_DDLTIME hint cannot be applied to dynamic " +
-                                      "partitions or non-existent partitions"),
   OFFLINE_TABLE_OR_PARTITION(10113, "Query against an offline table or partition"),
   OUTERJOIN_USES_FILTERS(10114, "The query results could be wrong. " +
                          "Turn on hive.outerjoin.supports.filters"),

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 7e257e5..920bb1c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -310,7 +310,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
         if (tbd.getPartitionSpec().size() == 0) {
           dc = new DataContainer(table.getTTable());
           db.loadTable(tbd.getSourcePath(), tbd.getTable()
-              .getTableName(), tbd.getReplace(), tbd.getHoldDDLTime(), work.isSrcLocal(),
+              .getTableName(), tbd.getReplace(), work.isSrcLocal(),
               isSkewedStoredAsDirs(tbd),
               work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID);
           if (work.getOutputs() != null) {
@@ -392,7 +392,6 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
                 tbd.getPartitionSpec(),
                 tbd.getReplace(),
                 dpCtx.getNumDPCols(),
-                tbd.getHoldDDLTime(),
                 isSkewedStoredAsDirs(tbd),
                 work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID,
                 SessionState.get().getTxnMgr().getCurrentTxnId());
@@ -451,11 +450,10 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
                 tbd.getPartitionSpec());
             db.validatePartitionNameCharacters(partVals);
             db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(),
-                tbd.getPartitionSpec(), tbd.getReplace(), tbd.getHoldDDLTime(),
+                tbd.getPartitionSpec(), tbd.getReplace(),
                 tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(),
                 work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID);
-            Partition partn = db.getPartition(table, tbd.getPartitionSpec(),
-                false);
+            Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
 
             if (bucketCols != null || sortCols != null) {
               updatePartitionBucketSortColumns(table, partn, bucketCols,

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 4e3be0d..c64d8d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -112,7 +112,6 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -1378,11 +1377,11 @@ public class Hive {
   }
 
   public void loadPartition(Path loadPath, String tableName,
-      Map<String, String> partSpec, boolean replace, boolean holdDDLTime,
+      Map<String, String> partSpec, boolean replace,
       boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir,
       boolean isSrcLocal, boolean isAcid) throws HiveException {
     Table tbl = getTable(tableName);
-    loadPartition(loadPath, tbl, partSpec, replace, holdDDLTime, inheritTableSpecs,
+    loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs,
         isSkewedStoreAsSubdir, isSrcLocal, isAcid);
   }
 
@@ -1401,7 +1400,6 @@ public class Hive {
    * @param replace
    *          if true - replace files in the partition, otherwise add files to
    *          the partition
-   * @param holdDDLTime if true, force [re]create the partition
    * @param inheritTableSpecs if true, on [re]creating the partition, take the
    *          location/inputformat/outputformat/serde details from table spec
    * @param isSrcLocal
@@ -1409,7 +1407,7 @@ public class Hive {
    * @param isAcid true if this is an ACID operation
    */
   public Partition loadPartition(Path loadPath, Table tbl,
-      Map<String, String> partSpec, boolean replace, boolean holdDDLTime,
+      Map<String, String> partSpec, boolean replace,
       boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir,
       boolean isSrcLocal, boolean isAcid) throws HiveException {
     Path tblDataLocationPath =  tbl.getDataLocation();
@@ -1464,26 +1462,24 @@ public class Hive {
         Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles);
       }
 
-      boolean forceCreate = (!holdDDLTime) ? true : false;
-      newTPart = getPartition(tbl, partSpec, forceCreate, newPartPath.toString(),
+      newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(),
           inheritTableSpecs, newFiles);
       // recreate the partition if it existed before
-      if (!holdDDLTime) {
-        if (isSkewedStoreAsSubdir) {
-          org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition();
-          SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo();
-          /* Construct list bucketing location mappings from sub-directory name. */
-          Map<List<String>, String> skewedColValueLocationMaps = constructListBucketingLocationMap(
-              newPartPath, skewedInfo);
-          /* Add list bucketing location mappings. */
-          skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps);
-          newCreatedTpart.getSd().setSkewedInfo(skewedInfo);
-          alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newCreatedTpart));
-          newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs,
-              newFiles);
-          return new Partition(tbl, newCreatedTpart);
-        }
+      if (isSkewedStoreAsSubdir) {
+        org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition();
+        SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo();
+        /* Construct list bucketing location mappings from sub-directory name. */
+        Map<List<String>, String> skewedColValueLocationMaps = constructListBucketingLocationMap(
+            newPartPath, skewedInfo);
+        /* Add list bucketing location mappings. */
+        skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps);
+        newCreatedTpart.getSd().setSkewedInfo(skewedInfo);
+        alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newCreatedTpart));
+        newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs,
+            newFiles);
+        return new Partition(tbl, newCreatedTpart);
       }
+
     } catch (IOException e) {
       LOG.error(StringUtils.stringifyException(e));
       throw new HiveException(e);
@@ -1589,7 +1585,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @param partSpec
    * @param replace
    * @param numDP number of dynamic partitions
-   * @param holdDDLTime
    * @param listBucketingEnabled
    * @param isAcid true if this is an ACID operation
    * @param txnId txnId, can be 0 unless isAcid == true
@@ -1598,7 +1593,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
    */
   public Map<Map<String, String>, Partition> loadDynamicPartitions(Path loadPath,
       String tableName, Map<String, String> partSpec, boolean replace,
-      int numDP, boolean holdDDLTime, boolean listBucketingEnabled, boolean isAcid, long txnId)
+      int numDP, boolean listBucketingEnabled, boolean isAcid, long txnId)
       throws HiveException {
 
     Set<Path> validPartitions = new HashSet<Path>();
@@ -1661,7 +1656,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
         LinkedHashMap<String, String> fullPartSpec = new LinkedHashMap<String, String>(partSpec);
         Warehouse.makeSpecFromName(fullPartSpec, partPath);
         Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace,
-            holdDDLTime, true, listBucketingEnabled, false, isAcid);
+            true, listBucketingEnabled, false, isAcid);
         partitionsMap.put(fullPartSpec, newPartition);
         if (inPlaceEligible) {
           InPlaceUpdates.rePositionCursor(ps);
@@ -1696,7 +1691,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
    *          name of table to be loaded.
    * @param replace
    *          if true - replace files in the table, otherwise add files to table
-   * @param holdDDLTime
    * @param isSrcLocal
    *          If the source directory is LOCAL
    * @param isSkewedStoreAsSubdir
@@ -1704,7 +1698,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @param isAcid true if this is an ACID based write
    */
   public void loadTable(Path loadPath, String tableName, boolean replace,
-      boolean holdDDLTime, boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid)
+      boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid)
       throws HiveException {
     List<Path> newFiles = new ArrayList<Path>();
     Table tbl = getTable(tableName);
@@ -1737,13 +1731,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
       throw new HiveException(e);
     }
 
-    if (!holdDDLTime) {
-      try {
-        alterTable(tableName, tbl);
-      } catch (InvalidOperationException e) {
-        throw new HiveException(e);
-      }
+    try {
+      alterTable(tableName, tbl);
+    } catch (InvalidOperationException e) {
+      throw new HiveException(e);
     }
+
     fireInsertEvent(tbl, null, newFiles);
   }
 
@@ -2934,8 +2927,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
     try {
 
       FileSystem destFs = destf.getFileSystem(conf);
-      boolean inheritPerms = HiveConf.getBoolVar(conf,
-          HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
 
       // check if srcf contains nested sub-directories
       FileStatus[] srcs;

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index 9f8cfd1..e9fbfb1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -199,7 +199,6 @@ KW_ELSE: 'ELSE';
 KW_END: 'END';
 KW_MAPJOIN: 'MAPJOIN';
 KW_STREAMTABLE: 'STREAMTABLE';
-KW_HOLD_DDLTIME: 'HOLD_DDLTIME';
 KW_CLUSTERSTATUS: 'CLUSTERSTATUS';
 KW_UTC: 'UTC';
 KW_UTCTIMESTAMP: 'UTC_TMESTAMP';

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 5eededd..d8fb83d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -257,7 +257,6 @@ TOK_HINTLIST;
 TOK_HINT;
 TOK_MAPJOIN;
 TOK_STREAMTABLE;
-TOK_HOLD_DDLTIME;
 TOK_HINTARGLIST;
 TOK_USERSCRIPTCOLNAMES;
 TOK_USERSCRIPTCOLSCHEMA;

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
index 1dcf392..48bc8b0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
@@ -107,7 +107,6 @@ hintName
     :
     KW_MAPJOIN -> TOK_MAPJOIN
     | KW_STREAMTABLE -> TOK_STREAMTABLE
-    | KW_HOLD_DDLTIME -> TOK_HOLD_DDLTIME
     ;
 
 hintArgs

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 3262887..f47428c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6141,25 +6141,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       ctx.setPartnCols(partnColsNoConvert);
     }
   }
-  /**
-   * Check for HOLD_DDLTIME hint.
-   *
-   * @param qb
-   * @return true if HOLD_DDLTIME is set, false otherwise.
-   */
-  private boolean checkHoldDDLTime(QB qb) {
-    ASTNode hints = qb.getParseInfo().getHints();
-    if (hints == null) {
-      return false;
-    }
-    for (int pos = 0; pos < hints.getChildCount(); pos++) {
-      ASTNode hint = (ASTNode) hints.getChild(pos);
-      if (((ASTNode) hint.getChild(0)).getToken().getType() == HiveParser.TOK_HOLD_DDLTIME) {
-        return true;
-      }
-    }
-    return false;
-  }
 
   @SuppressWarnings("nls")
   protected Operator genFileSinkPlan(String dest, QB qb, Operator input)
@@ -6181,7 +6162,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     SortBucketRSCtx rsCtx = new SortBucketRSCtx();
     DynamicPartitionCtx dpCtx = null;
     LoadTableDesc ltd = null;
-    boolean holdDDLTime = checkHoldDDLTime(qb);
     ListBucketingCtx lbCtx = null;
 
     switch (dest_type.intValue()) {
@@ -6228,13 +6208,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
               qb.getParseInfo().getDestForClause(dest),
               ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
         }
-        // the HOLD_DDLTIIME hint should not be used with dynamic partition since the
-        // newly generated partitions should always update their DDLTIME
-        if (holdDDLTime) {
-          throw new SemanticException(generateErrorMessage(
-              qb.getParseInfo().getDestForClause(dest),
-              ErrorMsg.HOLD_DDLTIME_ON_NONEXIST_PARTITIONS.getMsg()));
-        }
         dpCtx = qbm.getDPCtx(dest);
         if (dpCtx == null) {
           dest_tab.validatePartColumnNames(partSpec, false);
@@ -6294,11 +6267,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
             dest_tab.getTableName()));
         ltd.setLbCtx(lbCtx);
-
-        if (holdDDLTime) {
-          LOG.info("this query will not update transient_lastDdlTime!");
-          ltd.setHoldDDLTime(true);
-        }
         loadTableWork.add(ltd);
       }
 
@@ -6404,20 +6372,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           dest_tab.getTableName()));
       ltd.setLbCtx(lbCtx);
 
-      if (holdDDLTime) {
-        try {
-          Partition part = db.getPartition(dest_tab, dest_part.getSpec(), false);
-          if (part == null) {
-            throw new SemanticException(generateErrorMessage(
-                qb.getParseInfo().getDestForClause(dest),
-                ErrorMsg.HOLD_DDLTIME_ON_NONEXIST_PARTITIONS.getMsg()));
-          }
-        } catch (HiveException e) {
-          throw new SemanticException(e);
-        }
-        LOG.info("this query will not update transient_lastDdlTime!");
-        ltd.setHoldDDLTime(true);
-      }
       loadTableWork.add(ltd);
       if (!outputs.add(new WriteEntity(dest_part, (ltd.getReplace() ?
           WriteEntity.WriteType.INSERT_OVERWRITE :

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
index 3e74d95..427aac1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
@@ -36,7 +36,6 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
   private boolean replace;
   private DynamicPartitionCtx dpCtx;
   private ListBucketingCtx lbCtx;
-  private boolean holdDDLTime;
   private boolean inheritTableSpecs = true; //For partitions, flag controlling whether the current
                                             //table specs are to be used
   // Need to remember whether this is an acid compliant operation, and if so whether it is an
@@ -47,10 +46,6 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
   private org.apache.hadoop.hive.ql.plan.TableDesc table;
   private Map<String, String> partitionSpec; // NOTE: this partitionSpec has to be ordered map
 
-  public LoadTableDesc() {
-    this.holdDDLTime = false;
-  }
-
   public LoadTableDesc(final Path sourcePath,
       final org.apache.hadoop.hive.ql.plan.TableDesc table,
       final Map<String, String> partitionSpec,
@@ -114,18 +109,9 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
     this.table = table;
     this.partitionSpec = partitionSpec;
     this.replace = replace;
-    this.holdDDLTime = false;
     this.writeType = writeType;
   }
 
-  public void setHoldDDLTime(boolean ddlTime) {
-    holdDDLTime = ddlTime;
-  }
-
-  public boolean getHoldDDLTime() {
-    return holdDDLTime;
-  }
-
   @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public TableDesc getTable() {
     return table;

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
index ca59e90..e6d3b29 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
@@ -139,7 +139,7 @@ public class TestExecDriver extends TestCase {
         db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
         db.createTable(src, cols, null, TextInputFormat.class,
             HiveIgnoreKeyTextOutputFormat.class);
-        db.loadTable(hadoopDataFile[i], src, false, false, true, false, false);
+        db.loadTable(hadoopDataFile[i], src, false, true, false, false);
         i++;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/test/queries/clientnegative/ddltime.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/ddltime.q b/ql/src/test/queries/clientnegative/ddltime.q
deleted file mode 100644
index 3517a60..0000000
--- a/ql/src/test/queries/clientnegative/ddltime.q
+++ /dev/null
@@ -1,6 +0,0 @@
-
-create table T2 like srcpart;
-
-insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10;
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/test/queries/clientpositive/ddltime.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ddltime.q b/ql/src/test/queries/clientpositive/ddltime.q
deleted file mode 100644
index 3eead6f..0000000
--- a/ql/src/test/queries/clientpositive/ddltime.q
+++ /dev/null
@@ -1,45 +0,0 @@
-create table T1 like src;
-
-desc extended T1;
-
-!sleep 1;
-insert overwrite table T1 select * from src;
-
-desc extended T1;
-
-!sleep 1;
-
-insert overwrite table T1 select /*+ HOLD_DDLTIME*/ * from src;
-
-desc extended T1;
-
-!sleep 1;
-
-insert overwrite table T1 select * from src;
-
-desc extended T1;
-
-
-
-create table if not exists T2 like srcpart;
-desc extended T2;
-
-!sleep 1;
-
-insert overwrite table T2 partition (ds = '2010-06-21', hr = '1') select key, value from src where key > 10;
-
-desc extended T2 partition (ds = '2010-06-21', hr = '1');
-
-!sleep 1;
-
-insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10;
-
-desc extended T2 partition (ds = '2010-06-21', hr = '1');
-
-!sleep 1;
-
-insert overwrite table T2 partition (ds='2010-06-01', hr='1') select key, value from src where key > 10;
-
-desc extended T2 partition(ds='2010-06-01', hr='1');
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/test/results/clientnegative/ddltime.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/ddltime.q.out b/ql/src/test/results/clientnegative/ddltime.q.out
deleted file mode 100644
index 25d9af6..0000000
--- a/ql/src/test/results/clientnegative/ddltime.q.out
+++ /dev/null
@@ -1,9 +0,0 @@
-PREHOOK: query: create table T2 like srcpart
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T2
-POSTHOOK: query: create table T2 like srcpart
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T2
-FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: 3:23 HOLD_DDLTIME hint cannot be applied to dynamic partitions or non-existent partitions. Error encountered near token ''1''

http://git-wip-us.apache.org/repos/asf/hive/blob/87989da5/ql/src/test/results/clientpositive/ddltime.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ddltime.q.out b/ql/src/test/results/clientpositive/ddltime.q.out
deleted file mode 100644
index ec8938d..0000000
--- a/ql/src/test/results/clientpositive/ddltime.q.out
+++ /dev/null
@@ -1,188 +0,0 @@
-PREHOOK: query: create table T1 like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1
-POSTHOOK: query: create table T1 like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1
-PREHOOK: query: desc extended T1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@t1
-POSTHOOK: query: desc extended T1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@t1
-key                 	string              	default             
-value               	string              	default             
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: insert overwrite table T1 select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@t1
-POSTHOOK: query: insert overwrite table T1 select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@t1
-POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc extended T1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@t1
-POSTHOOK: query: desc extended T1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@t1
-key                 	string              	default             
-value               	string              	default             
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: insert overwrite table T1 select /*+ HOLD_DDLTIME*/ * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@t1
-POSTHOOK: query: insert overwrite table T1 select /*+ HOLD_DDLTIME*/ * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@t1
-POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc extended T1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@t1
-POSTHOOK: query: desc extended T1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@t1
-key                 	string              	default             
-value               	string              	default             
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: insert overwrite table T1 select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@t1
-POSTHOOK: query: insert overwrite table T1 select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@t1
-POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc extended T1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@t1
-POSTHOOK: query: desc extended T1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@t1
-key                 	string              	default             
-value               	string              	default             
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: create table if not exists T2 like srcpart
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T2
-POSTHOOK: query: create table if not exists T2 like srcpart
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T2
-PREHOOK: query: desc extended T2
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@t2
-POSTHOOK: query: desc extended T2
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@t2
-key                 	string              	default             
-value               	string              	default             
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr = '1') select key, value from src where key > 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@t2@ds=2010-06-21/hr=1
-POSTHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr = '1') select key, value from src where key > 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@t2@ds=2010-06-21/hr=1
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@t2
-POSTHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@t2
-key                 	string              	default             
-value               	string              	default             
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@t2@ds=2010-06-21/hr=1
-POSTHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@t2@ds=2010-06-21/hr=1
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@t2
-POSTHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@t2
-key                 	string              	default             
-value               	string              	default             
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: insert overwrite table T2 partition (ds='2010-06-01', hr='1') select key, value from src where key > 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@t2@ds=2010-06-01/hr=1
-POSTHOOK: query: insert overwrite table T2 partition (ds='2010-06-01', hr='1') select key, value from src where key > 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@t2@ds=2010-06-01/hr=1
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc extended T2 partition(ds='2010-06-01', hr='1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@t2
-POSTHOOK: query: desc extended T2 partition(ds='2010-06-01', hr='1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@t2
-key                 	string              	default             
-value               	string              	default             
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-#### A masked pattern was here ####