You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sp...@apache.org on 2017/05/09 15:07:49 UTC

hive git commit: HIVE-16469: Parquet timestamp table property is not always taken into account (Barna Zsombor Klara, reviewed by Sergio Pena, Ferdinand Xu)

Repository: hive
Updated Branches:
  refs/heads/master 15bfc0ebc -> 78e29fc70


HIVE-16469: Parquet timestamp table property is not always taken into account (Barna Zsombor Klara, reviewed by Sergio Pena, Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/78e29fc7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/78e29fc7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/78e29fc7

Branch: refs/heads/master
Commit: 78e29fc70dacec498c35dc556dd7403e4c9f48fe
Parents: 15bfc0e
Author: Barna Zsombor Klara <zs...@cloudera.com>
Authored: Tue May 9 10:06:19 2017 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Tue May 9 10:06:19 2017 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  15 +-
 .../hadoop/hive/ql/exec/FetchOperator.java      |   6 +
 .../hadoop/hive/ql/exec/StatsNoJobTask.java     |   5 +
 .../hadoop/hive/ql/exec/mr/MapRedTask.java      |   2 +-
 .../ql/io/parquet/MapredParquetInputFormat.java |  47 ++
 .../ql/io/parquet/ParquetRecordReaderBase.java  |   3 +-
 .../ql/io/parquet/serde/ParquetHiveSerDe.java   |   9 +
 .../ql/io/parquet/serde/ParquetTableUtils.java  |  22 +
 .../ql/io/parquet/timestamp/NanoTimeUtils.java  |   5 +-
 .../io/parquet/AbstractTestParquetDirect.java   |   7 +-
 .../io/parquet/TestParquetRowGroupFilter.java   |   3 +
 .../parquet/VectorizedColumnReaderTestBase.java |  13 +
 .../io/parquet/timestamp/TestNanoTimeUtils.java |  12 +-
 .../parquet_int96_alter_invalid_timezone.q      |   5 +
 .../parquet_int96_create_invalid_timezone.q     |   3 +
 .../clientpositive/parquet_int96_timestamp.q    |  21 +
 .../parquet_int96_alter_invalid_timezone.q.out  |  13 +
 .../parquet_int96_create_invalid_timezone.q.out |   5 +
 .../parquet_int96_timestamp.q.out               | 191 ++++-
 .../spark/parquet_int96_timestamp.q.out         | 718 +++++++++++++++++++
 20 files changed, 1091 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 757b7fc..cf575de 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -118,6 +118,7 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
 import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
 import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetTableUtils;
+import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTimeUtils;
 import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask;
 import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork;
 import org.apache.hadoop.hive.ql.lockmgr.DbLockManager;
@@ -3815,6 +3816,10 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
               .get(StatsSetupConst.STATS_GENERATED))) {
         environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
       }
+      if(alterTbl.getProps().containsKey(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY)) {
+        NanoTimeUtils.validateTimeZone(
+            alterTbl.getProps().get(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY));
+      }
       if (part != null) {
         part.getTPartition().getParameters().putAll(alterTbl.getProps());
       } else {
@@ -4343,13 +4348,15 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
 
     // If HIVE_PARQUET_INT96_DEFAULT_UTC_WRITE_ZONE is set to True, then set new Parquet tables timezone
     // to UTC by default (only if the table property is not set)
-    if (tbl.getSerializationLib().equals(ParquetHiveSerDe.class.getName())) {
+    if (ParquetHiveSerDe.isParquetTable(tbl)) {
       SessionState ss = SessionState.get();
-      if (ss.getConf().getBoolVar(ConfVars.HIVE_PARQUET_INT96_DEFAULT_UTC_WRITE_ZONE)) {
-        String parquetTimezone = tbl.getProperty(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY);
-        if (parquetTimezone == null || parquetTimezone.isEmpty()) {
+      String parquetTimezone = tbl.getProperty(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY);
+      if (parquetTimezone == null || parquetTimezone.isEmpty()) {
+        if (ss.getConf().getBoolVar(ConfVars.HIVE_PARQUET_INT96_DEFAULT_UTC_WRITE_ZONE)) {
           tbl.setProperty(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, ParquetTableUtils.PARQUET_INT96_NO_ADJUSTMENT_ZONE);
         }
+      } else {
+        NanoTimeUtils.validateTimeZone(parquetTimezone);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index 13750cd..a575cdd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -42,6 +42,9 @@ import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveRecordReader;
+import org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetTableUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.parse.SplitSample;
@@ -368,6 +371,9 @@ public class FetchOperator implements Serializable {
 
       Class<? extends InputFormat> formatter = currDesc.getInputFileFormatClass();
       Utilities.copyTableJobPropertiesToConf(currDesc.getTableDesc(), job);
+      if (ParquetHiveSerDe.class.getName().equals(currDesc.getTableDesc().getSerdeClassName())) {
+        ParquetTableUtils.setParquetTimeZoneIfAbsent(job, currDesc.getTableDesc().getProperties());
+      }
       InputFormat inputFormat = getInputFormatFromCache(formatter, job);
 
       InputSplit[] splits = inputFormat.getSplits(job, 1);

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
index 9c3a664..3807f43 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
@@ -27,6 +27,8 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetTableUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
@@ -257,6 +259,9 @@ public class StatsNoJobTask extends Task<StatsNoJobWork> implements Serializable
                 numFiles += 1;
                 statsAvailable = true;
               } else {
+                if (ParquetHiveSerDe.isParquetTable(table)) {
+                  ParquetTableUtils.setParquetTimeZoneIfAbsent(jc, table.getParameters());
+                }
                 org.apache.hadoop.mapred.RecordReader<?, ?> recordReader =
                     inputFormat.getRecordReader(dummySplit, jc, Reporter.NULL);
                 StatsProvidingRecordReader statsRR;

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
index 1bd4db7..f79a592 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
@@ -69,7 +69,7 @@ public class MapRedTask extends ExecDriver implements Serializable {
   static final String HIVE_DEBUG_RECURSIVE = "HIVE_DEBUG_RECURSIVE";
   static final String HIVE_MAIN_CLIENT_DEBUG_OPTS = "HIVE_MAIN_CLIENT_DEBUG_OPTS";
   static final String HIVE_CHILD_CLIENT_DEBUG_OPTS = "HIVE_CHILD_CLIENT_DEBUG_OPTS";
-  static final String[] HIVE_SYS_PROP = {"build.dir", "build.dir.hive", "hive.query.id"};
+  static final String[] HIVE_SYS_PROP = {"build.dir", "build.dir.hive", "hive.query.id", "user.timezone"};
 
   private transient ContentSummary inputSummary = null;
   private transient boolean runningViaChild = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java
index f4fadbb..aeccfa5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java
@@ -14,8 +14,16 @@
 package org.apache.hadoop.hive.ql.io.parquet;
 
 import java.io.IOException;
+import java.util.Map;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
+import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetTableUtils;
+import org.apache.hadoop.hive.ql.plan.MapWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.hadoop.mapred.JobConf;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -61,6 +69,9 @@ public class MapredParquetInputFormat extends FileInputFormat<NullWritable, Arra
       final org.apache.hadoop.mapred.JobConf job,
       final org.apache.hadoop.mapred.Reporter reporter
       ) throws IOException {
+
+    propagateParquetTimeZoneTablePorperty((FileSplit) split, job);
+
     try {
       if (Utilities.getUseVectorizedInputFileFormat(job)) {
         if (LOG.isDebugEnabled()) {
@@ -78,4 +89,40 @@ public class MapredParquetInputFormat extends FileInputFormat<NullWritable, Arra
       throw new RuntimeException("Cannot create a RecordReaderWrapper", e);
     }
   }
+
+  /**
+   * Tries to find the table belonging to the file path of the split.
+   * If the table can be determined, the parquet timezone property will be propagated
+   * to the job configuration to be used during reading.
+   * If the table cannot be determined, then do nothing.
+   * @param split file split being read
+   * @param job configuration to set the timezone property on
+   */
+  private void propagateParquetTimeZoneTablePorperty(FileSplit split, JobConf job) {
+    PartitionDesc part = null;
+    Path filePath = split.getPath();
+    try {
+      MapWork mapWork = Utilities.getMapWork(job);
+      if(mapWork != null) {
+        LOG.debug("Trying to find partition in MapWork for path " + filePath);
+        Map<Path, PartitionDesc> pathToPartitionInfo = mapWork.getPathToPartitionInfo();
+
+        part = HiveFileFormatUtils
+            .getPartitionDescFromPathRecursively(pathToPartitionInfo, filePath, null);
+        LOG.debug("Partition found " + part);
+      }
+    } catch (AssertionError ae) {
+      LOG.warn("Cannot get partition description from " + filePath
+          + " because " + ae.getMessage());
+      part = null;
+    } catch (Exception e) {
+      LOG.warn("Cannot get partition description from " + filePath
+          + " because " + e.getMessage());
+      part = null;
+    }
+
+    if (part != null && part.getTableDesc() != null) {
+      ParquetTableUtils.setParquetTimeZoneIfAbsent(job, part.getTableDesc().getProperties());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java
index 2954601..be9fb10 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java
@@ -178,8 +178,7 @@ public class ParquetRecordReaderBase {
     } else {
       // TABLE_PARQUET_INT96_TIMEZONE is a table property used to detect what timezone conversion
       // to use when reading Parquet timestamps.
-      timeZoneID = configuration.get(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY,
-          TimeZone.getDefault().getID());
+      timeZoneID = configuration.get(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY);
       NanoTimeUtils.validateTimeZone(timeZoneID);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java
index 6413c5a..a800991 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java
@@ -23,6 +23,7 @@ import java.util.Properties;
 import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.FieldNode;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
@@ -182,6 +183,14 @@ public class ParquetHiveSerDe extends AbstractSerDe {
   }
 
   /**
+   * @param table
+   * @return true if the table has the parquet serde defined
+   */
+  public static boolean isParquetTable(Table table) {
+    return  table == null ? false : ParquetHiveSerDe.class.getName().equals(table.getSerializationLib());
+  }
+
+  /**
    * Given a list of raw pruned paths separated by ',', return a list of merged pruned paths.
    * For instance, if the 'prunedPaths' is "s.a, s, s", this returns ["s"].
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetTableUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetTableUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetTableUtils.java
index b339cc4..9196bd6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetTableUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetTableUtils.java
@@ -13,6 +13,11 @@
  */
 package org.apache.hadoop.hive.ql.io.parquet.serde;
 
+import org.apache.hadoop.mapred.JobConf;
+
+import java.util.Map;
+import java.util.TimeZone;
+
 public class ParquetTableUtils {
   // Parquet table properties
   public static final String PARQUET_INT96_WRITE_ZONE_PROPERTY = "parquet.mr.int96.write.zone";
@@ -20,4 +25,21 @@ public class ParquetTableUtils {
   // This is not a TimeZone we convert into and print out, rather a delta, an adjustment we use.
   // More precisely the lack of an adjustment in case of UTC
   public static final String PARQUET_INT96_NO_ADJUSTMENT_ZONE = "UTC";
+
+  /**
+   * Propagates the parquet timezone property to the job configuration from the table property
+   * or sets the default
+   * @param jc the job conf to set the parquet timezone property on
+   * @param tableProps the table properties which may contain the parquet timezone
+   */
+  public static void setParquetTimeZoneIfAbsent(JobConf jc, Map<?, ?> tableProps) {
+    if (tableProps != null && jc != null) {
+      if (tableProps.containsKey(PARQUET_INT96_WRITE_ZONE_PROPERTY)) {
+        jc.set(PARQUET_INT96_WRITE_ZONE_PROPERTY,
+            (String)tableProps.get(PARQUET_INT96_WRITE_ZONE_PROPERTY));
+      } else {
+        jc.set(PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
index dbd6fb3..30f6494 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
@@ -165,9 +165,12 @@ public class NanoTimeUtils {
    * @param timeZoneID
    */
   public static void validateTimeZone(String timeZoneID) {
+    if(timeZoneID == null) {
+      throw new IllegalArgumentException("Missing timezone id for parquet int96 conversion!");
+    }
     if (TimeZone.getTimeZone(timeZoneID).getID().equals("GMT")
         && !"GMT".equals(timeZoneID)) {
-      throw new IllegalStateException(
+      throw new IllegalArgumentException(
           "Unexpected timezone id found for parquet int96 conversion: " + timeZoneID);
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/AbstractTestParquetDirect.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/AbstractTestParquetDirect.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/AbstractTestParquetDirect.java
index c81499a..e53c951 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/AbstractTestParquetDirect.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/AbstractTestParquetDirect.java
@@ -27,10 +27,13 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.TimeZone;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetTableUtils;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.io.NullWritable;
@@ -141,10 +144,12 @@ public abstract class AbstractTestParquetDirect {
   public static List<ArrayWritable> read(Path parquetFile) throws IOException {
     List<ArrayWritable> records = new ArrayList<ArrayWritable>();
 
+    JobConf job = new JobConf();
+    job.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     RecordReader<NullWritable, ArrayWritable> reader = new MapredParquetInputFormat().
         getRecordReader(new FileSplit(
                 parquetFile, 0, fileLength(parquetFile), (String[]) null),
-            new JobConf(), null);
+            job, null);
 
     NullWritable alwaysNull = reader.createKey();
     ArrayWritable record = reader.createValue();

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestParquetRowGroupFilter.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestParquetRowGroupFilter.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestParquetRowGroupFilter.java
index bf363f3..b712ee9 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestParquetRowGroupFilter.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestParquetRowGroupFilter.java
@@ -21,11 +21,13 @@ package org.apache.hadoop.hive.ql.io.parquet;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.TimeZone;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
 import org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper;
 import org.apache.hadoop.hive.ql.io.parquet.serde.ArrayWritableObjectInspector;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetTableUtils;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -103,6 +105,7 @@ public class TestParquetRowGroupFilter extends AbstractTestParquetDirect {
     ExprNodeGenericFuncDesc genericFuncDesc = new ExprNodeGenericFuncDesc(inspector, udf, children);
     String searchArgumentStr = SerializationUtilities.serializeExpression(genericFuncDesc);
     conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, searchArgumentStr);
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
 
     ParquetRecordReaderWrapper recordReader = (ParquetRecordReaderWrapper)
         new MapredParquetInputFormat().getRecordReader(

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java
index f2d79cf..4217935 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import org.apache.hadoop.hive.ql.io.parquet.read.DataWritableReadSupport;
 import org.apache.hadoop.hive.ql.io.parquet.serde.ArrayWritableObjectInspector;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetTableUtils;
 import org.apache.hadoop.hive.ql.io.parquet.vector.VectorizedParquetRecordReader;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.MapWork;
@@ -57,6 +58,7 @@ import java.io.IOException;
 import java.math.BigDecimal;
 import java.math.BigInteger;
 import java.util.List;
+import java.util.TimeZone;
 
 import static junit.framework.Assert.assertTrue;
 import static junit.framework.TestCase.assertFalse;
@@ -321,6 +323,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES,"int");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     VectorizedParquetRecordReader reader =
       createParquetReader("message test { required int32 int32_field;}", conf);
     VectorizedRowBatch previous = reader.createValue();
@@ -350,6 +353,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "bigint");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     VectorizedParquetRecordReader reader =
       createParquetReader("message test { required int64 int64_field;}", conf);
     VectorizedRowBatch previous = reader.createValue();
@@ -379,6 +383,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "double");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     VectorizedParquetRecordReader reader =
       createParquetReader("message test { required double double_field;}", conf);
     VectorizedRowBatch previous = reader.createValue();
@@ -409,6 +414,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "float");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     VectorizedParquetRecordReader reader =
       createParquetReader("message test { required float float_field;}", conf);
     VectorizedRowBatch previous = reader.createValue();
@@ -439,6 +445,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "boolean");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     VectorizedParquetRecordReader reader =
       createParquetReader("message test { required boolean boolean_field;}", conf);
     VectorizedRowBatch previous = reader.createValue();
@@ -468,6 +475,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "string");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     VectorizedParquetRecordReader reader =
       createParquetReader("message test { required binary binary_field_some_null;}", conf);
     VectorizedRowBatch previous = reader.createValue();
@@ -506,6 +514,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "struct<a:int,b:double>");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     String schema = "message hive_schema {\n"
       + "group struct_field {\n"
       + "  optional int32 a;\n"
@@ -544,6 +553,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "struct<nsf:struct<c:int,d:int>,e:double>");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     String schema = "message hive_schema {\n"
       + "group nested_struct_field {\n"
       + "  optional group nsf {\n"
@@ -587,6 +597,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "struct<nsf:struct<c:int>>");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     String schema = "message hive_schema {\n"
       + "group nested_struct_field {\n"
       + "  optional group nsf {\n"
@@ -624,6 +635,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "struct<f:int,g:double>");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     String schema = "message hive_schema {\n"
       + "group struct_field_some_null {\n"
       + "  optional int32 f;\n"
@@ -669,6 +681,7 @@ public class VectorizedColumnReaderTestBase {
     conf.set(IOConstants.COLUMNS_TYPES, "decimal(5,2)");
     conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
     conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    conf.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, TimeZone.getDefault().getID());
     VectorizedParquetRecordReader reader =
       createParquetReader("message hive_schema { required value (DECIMAL(5,2));}", conf);
     VectorizedRowBatch previous = reader.createValue();

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/timestamp/TestNanoTimeUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/timestamp/TestNanoTimeUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/timestamp/TestNanoTimeUtils.java
index 1e10dbf..5a66cd1 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/timestamp/TestNanoTimeUtils.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/timestamp/TestNanoTimeUtils.java
@@ -239,8 +239,18 @@ public class TestNanoTimeUtils {
     NanoTimeUtils.validateTimeZone("Europe/Budapest");
   }
 
-  @Test(expected = IllegalStateException.class)
+  @Test(expected = IllegalArgumentException.class)
   public void testTimeZoneValidationWithIncorrectZoneId() {
     NanoTimeUtils.validateTimeZone("UCC");
   }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testTimeZoneValidationWithMissingZoneId() {
+    NanoTimeUtils.validateTimeZone(null);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testTimeZoneValidationWithEmptyZoneId() {
+    NanoTimeUtils.validateTimeZone("");
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/queries/clientnegative/parquet_int96_alter_invalid_timezone.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/parquet_int96_alter_invalid_timezone.q b/ql/src/test/queries/clientnegative/parquet_int96_alter_invalid_timezone.q
new file mode 100644
index 0000000..2de92ad
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/parquet_int96_alter_invalid_timezone.q
@@ -0,0 +1,5 @@
+-- alter table to invalid table property
+create table timestamps (ts timestamp) stored as parquet;
+alter table timestamps set tblproperties ('parquet.mr.int96.write.zone'='Invalid');
+
+drop table timestamps;

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/queries/clientnegative/parquet_int96_create_invalid_timezone.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/parquet_int96_create_invalid_timezone.q b/ql/src/test/queries/clientnegative/parquet_int96_create_invalid_timezone.q
new file mode 100644
index 0000000..ffba084
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/parquet_int96_create_invalid_timezone.q
@@ -0,0 +1,3 @@
+-- create table with invalid table property
+create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='Invalid');
+

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/queries/clientpositive/parquet_int96_timestamp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_int96_timestamp.q b/ql/src/test/queries/clientpositive/parquet_int96_timestamp.q
index 6eadd1b..d0640fa 100644
--- a/ql/src/test/queries/clientpositive/parquet_int96_timestamp.q
+++ b/ql/src/test/queries/clientpositive/parquet_int96_timestamp.q
@@ -37,10 +37,31 @@ drop table timestamps;
 -- read/write timestamps with timezone specified in table properties
 create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST');
 insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1;
+insert into table timestamps values('2017-01-01 01:01:01');
+-- parquet timezone flag set in the fetch operator
 select * from timestamps;
+-- parquet timezone flag set in MapredParquetInputFormat
+select * from timestamps order by ts;
+select * from timestamps where ts = cast('2016-01-01 01:01:01' as timestamp);
+-- using udfs
+select year(ts), day(ts), hour(ts), ts from timestamps;
 describe formatted timestamps;
 drop table timestamps;
 
+-- read timestamps with different timezones specified in two table properties
+create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST');
+insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1;
+insert into table timestamps values('2017-01-01 01:01:01');
+create table timestamps2 (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='GMT+2');
+insert into table timestamps2 select cast('2016-01-01 01:01:01' as timestamp) limit 1;
+insert into table timestamps2 values('2017-01-01 01:01:01');
+-- parquet timezone flag set in the MapredLocalTask
+select * from timestamps a inner join timestamps2 b on a.ts = b.ts;
+describe formatted timestamps;
+drop table timestamps;
+describe formatted timestamps2;
+drop table timestamps2;
+
 -- read timestamps written by Impala
 create table timestamps (ts timestamp) stored as parquet;
 load data local inpath '../../data/files/impala_int96_timestamp.parq' overwrite into table timestamps;

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/results/clientnegative/parquet_int96_alter_invalid_timezone.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/parquet_int96_alter_invalid_timezone.q.out b/ql/src/test/results/clientnegative/parquet_int96_alter_invalid_timezone.q.out
new file mode 100644
index 0000000..97d61a2
--- /dev/null
+++ b/ql/src/test/results/clientnegative/parquet_int96_alter_invalid_timezone.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: alter table timestamps set tblproperties ('parquet.mr.int96.write.zone'='Invalid')
+PREHOOK: type: ALTERTABLE_PROPERTIES
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unexpected timezone id found for parquet int96 conversion: Invalid

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/results/clientnegative/parquet_int96_create_invalid_timezone.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/parquet_int96_create_invalid_timezone.q.out b/ql/src/test/results/clientnegative/parquet_int96_create_invalid_timezone.q.out
new file mode 100644
index 0000000..d619ce6
--- /dev/null
+++ b/ql/src/test/results/clientnegative/parquet_int96_create_invalid_timezone.q.out
@@ -0,0 +1,5 @@
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='Invalid')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unexpected timezone id found for parquet int96 conversion: Invalid

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/results/clientpositive/parquet_int96_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_int96_timestamp.q.out b/ql/src/test/results/clientpositive/parquet_int96_timestamp.q.out
index b9a3664..83f1a69 100644
--- a/ql/src/test/results/clientpositive/parquet_int96_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/parquet_int96_timestamp.q.out
@@ -305,6 +305,13 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@timestamps
 POSTHOOK: Lineage: timestamps.ts EXPRESSION []
+PREHOOK: query: insert into table timestamps values('2017-01-01 01:01:01')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps values('2017-01-01 01:01:01')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 PREHOOK: query: select * from timestamps
 PREHOOK: type: QUERY
 PREHOOK: Input: default@timestamps
@@ -314,6 +321,36 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@timestamps
 #### A masked pattern was here ####
 2016-01-01 01:01:01
+2017-01-01 01:01:01
+PREHOOK: query: select * from timestamps order by ts
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps order by ts
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+2017-01-01 01:01:01
+PREHOOK: query: select * from timestamps where ts = cast('2016-01-01 01:01:01' as timestamp)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps where ts = cast('2016-01-01 01:01:01' as timestamp)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+PREHOOK: query: select year(ts), day(ts), hour(ts), ts from timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select year(ts), day(ts), hour(ts), ts from timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016	1	1	2016-01-01 01:01:01
+2017	1	1	2017-01-01 01:01:01
 PREHOOK: query: describe formatted timestamps
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@timestamps
@@ -332,11 +369,114 @@ Retention:          	0
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	1                   
-	numRows             	1                   
+	numFiles            	2                   
+	numRows             	2                   
 	parquet.mr.int96.write.zone	PST                 
-	rawDataSize         	1                   
-	totalSize           	272                 
+	rawDataSize         	2                   
+	totalSize           	544                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION []
+PREHOOK: query: insert into table timestamps values('2017-01-01 01:01:01')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps values('2017-01-01 01:01:01')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: create table timestamps2 (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='GMT+2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: create table timestamps2 (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='GMT+2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps2
+PREHOOK: query: insert into table timestamps2 select cast('2016-01-01 01:01:01' as timestamp) limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: insert into table timestamps2 select cast('2016-01-01 01:01:01' as timestamp) limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@timestamps2
+POSTHOOK: Lineage: timestamps2.ts EXPRESSION []
+PREHOOK: query: insert into table timestamps2 values('2017-01-01 01:01:01')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: insert into table timestamps2 values('2017-01-01 01:01:01')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@timestamps2
+POSTHOOK: Lineage: timestamps2.ts EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select * from timestamps a inner join timestamps2 b on a.ts = b.ts
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+PREHOOK: Input: default@timestamps2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps a inner join timestamps2 b on a.ts = b.ts
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Input: default@timestamps2
+#### A masked pattern was here ####
+2016-01-01 01:01:01	2016-01-01 01:01:01
+2017-01-01 01:01:01	2017-01-01 01:01:01
+PREHOOK: query: describe formatted timestamps
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps
+POSTHOOK: query: describe formatted timestamps
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numRows             	2                   
+	parquet.mr.int96.write.zone	PST                 
+	rawDataSize         	2                   
+	totalSize           	544                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -357,6 +497,49 @@ POSTHOOK: query: drop table timestamps
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@timestamps
 POSTHOOK: Output: default@timestamps
+PREHOOK: query: describe formatted timestamps2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps2
+POSTHOOK: query: describe formatted timestamps2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps2
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numRows             	2                   
+	parquet.mr.int96.write.zone	GMT+2               
+	rawDataSize         	2                   
+	totalSize           	544                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps2
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: drop table timestamps2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps2
+POSTHOOK: Output: default@timestamps2
 PREHOOK: query: create table timestamps (ts timestamp) stored as parquet
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/78e29fc7/ql/src/test/results/clientpositive/spark/parquet_int96_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/parquet_int96_timestamp.q.out b/ql/src/test/results/clientpositive/spark/parquet_int96_timestamp.q.out
new file mode 100644
index 0000000..83f1a69
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/parquet_int96_timestamp.q.out
@@ -0,0 +1,718 @@
+PREHOOK: query: create table dummy (id int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dummy
+POSTHOOK: query: create table dummy (id int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dummy
+PREHOOK: query: insert into table dummy values (1)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@dummy
+POSTHOOK: query: insert into table dummy values (1)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@dummy
+POSTHOOK: Lineage: dummy.id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION []
+PREHOOK: query: select * from timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+PREHOOK: query: describe formatted timestamps
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps
+POSTHOOK: query: describe formatted timestamps
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	1                   
+	parquet.mr.int96.write.zone	UTC                 
+	rawDataSize         	1                   
+	totalSize           	272                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION []
+PREHOOK: query: select * from timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+PREHOOK: query: describe formatted timestamps
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps
+POSTHOOK: query: describe formatted timestamps
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	1                   
+	parquet.mr.int96.write.zone	PST                 
+	rawDataSize         	1                   
+	totalSize           	272                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION []
+PREHOOK: query: select * from timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+PREHOOK: query: describe formatted timestamps
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps
+POSTHOOK: query: describe formatted timestamps
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	1                   
+	totalSize           	272                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='CST')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='CST')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION []
+PREHOOK: query: select * from timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+PREHOOK: query: describe formatted timestamps
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps
+POSTHOOK: query: describe formatted timestamps
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	1                   
+	parquet.mr.int96.write.zone	CST                 
+	rawDataSize         	1                   
+	totalSize           	272                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION []
+PREHOOK: query: insert into table timestamps values('2017-01-01 01:01:01')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps values('2017-01-01 01:01:01')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select * from timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+2017-01-01 01:01:01
+PREHOOK: query: select * from timestamps order by ts
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps order by ts
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+2017-01-01 01:01:01
+PREHOOK: query: select * from timestamps where ts = cast('2016-01-01 01:01:01' as timestamp)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps where ts = cast('2016-01-01 01:01:01' as timestamp)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+PREHOOK: query: select year(ts), day(ts), hour(ts), ts from timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select year(ts), day(ts), hour(ts), ts from timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016	1	1	2016-01-01 01:01:01
+2017	1	1	2017-01-01 01:01:01
+PREHOOK: query: describe formatted timestamps
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps
+POSTHOOK: query: describe formatted timestamps
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numRows             	2                   
+	parquet.mr.int96.write.zone	PST                 
+	rawDataSize         	2                   
+	totalSize           	544                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='PST')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps select cast('2016-01-01 01:01:01' as timestamp) limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION []
+PREHOOK: query: insert into table timestamps values('2017-01-01 01:01:01')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: insert into table timestamps values('2017-01-01 01:01:01')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@timestamps
+POSTHOOK: Lineage: timestamps.ts EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: create table timestamps2 (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='GMT+2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: create table timestamps2 (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='GMT+2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps2
+PREHOOK: query: insert into table timestamps2 select cast('2016-01-01 01:01:01' as timestamp) limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: insert into table timestamps2 select cast('2016-01-01 01:01:01' as timestamp) limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@timestamps2
+POSTHOOK: Lineage: timestamps2.ts EXPRESSION []
+PREHOOK: query: insert into table timestamps2 values('2017-01-01 01:01:01')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: insert into table timestamps2 values('2017-01-01 01:01:01')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@timestamps2
+POSTHOOK: Lineage: timestamps2.ts EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select * from timestamps a inner join timestamps2 b on a.ts = b.ts
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+PREHOOK: Input: default@timestamps2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps a inner join timestamps2 b on a.ts = b.ts
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Input: default@timestamps2
+#### A masked pattern was here ####
+2016-01-01 01:01:01	2016-01-01 01:01:01
+2017-01-01 01:01:01	2017-01-01 01:01:01
+PREHOOK: query: describe formatted timestamps
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps
+POSTHOOK: query: describe formatted timestamps
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numRows             	2                   
+	parquet.mr.int96.write.zone	PST                 
+	rawDataSize         	2                   
+	totalSize           	544                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: describe formatted timestamps2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps2
+POSTHOOK: query: describe formatted timestamps2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps2
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numRows             	2                   
+	parquet.mr.int96.write.zone	GMT+2               
+	rawDataSize         	2                   
+	totalSize           	544                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps2
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: drop table timestamps2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps2
+POSTHOOK: Output: default@timestamps2
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: load data local inpath '../../data/files/impala_int96_timestamp.parq' overwrite into table timestamps
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: load data local inpath '../../data/files/impala_int96_timestamp.parq' overwrite into table timestamps
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: select * from timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='GMT+10')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='GMT+10')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: load data local inpath '../../data/files/impala_int96_timestamp.parq' overwrite into table timestamps
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: load data local inpath '../../data/files/impala_int96_timestamp.parq' overwrite into table timestamps
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: select * from timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@timestamps
+#### A masked pattern was here ####
+2016-01-01 01:01:01
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='GMT+10')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: create table timestamps (ts timestamp) stored as parquet tblproperties('parquet.mr.int96.write.zone'='GMT+10')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: create table timestamps2 like timestamps
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: create table timestamps2 like timestamps
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@timestamps2
+PREHOOK: query: describe formatted timestamps
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps
+POSTHOOK: query: describe formatted timestamps
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	parquet.mr.int96.write.zone	GMT+10              
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: describe formatted timestamps2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@timestamps2
+POSTHOOK: query: describe formatted timestamps2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@timestamps2
+# col_name            	data_type           	comment             
+	 	 
+ts                  	timestamp           	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	parquet.mr.int96.write.zone	GMT+10              
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table timestamps
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps
+PREHOOK: Output: default@timestamps
+POSTHOOK: query: drop table timestamps
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps
+POSTHOOK: Output: default@timestamps
+PREHOOK: query: drop table timestamps2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@timestamps2
+PREHOOK: Output: default@timestamps2
+POSTHOOK: query: drop table timestamps2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@timestamps2
+POSTHOOK: Output: default@timestamps2
+PREHOOK: query: drop table if exists dummy
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@dummy
+PREHOOK: Output: default@dummy
+POSTHOOK: query: drop table if exists dummy
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@dummy
+POSTHOOK: Output: default@dummy