You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ek...@apache.org on 2018/02/27 22:06:27 UTC

hive git commit: HIVE-18659 - add acid version marker to acid files/directories (Eugene Koifman, reviewed by Prasanth Jayachandran)

Repository: hive
Updated Branches:
  refs/heads/master 5b2ab390f -> 388945f14


HIVE-18659 - add acid version marker to acid files/directories (Eugene Koifman, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/388945f1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/388945f1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/388945f1

Branch: refs/heads/master
Commit: 388945f141138fe1b7ed1597c498906327283dbc
Parents: 5b2ab39
Author: Eugene Koifman <ek...@apache.org>
Authored: Tue Feb 27 14:06:06 2018 -0800
Committer: Eugene Koifman <ek...@apache.org>
Committed: Tue Feb 27 14:06:06 2018 -0800

----------------------------------------------------------------------
 .../hive/hcatalog/streaming/TestStreaming.java  |   9 +-
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |  11 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 111 ++++++++++++++++---
 .../hadoop/hive/ql/io/orc/OrcOutputFormat.java  |   9 +-
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |  57 ++++------
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   9 +-
 .../hive/ql/txn/compactor/CompactorMR.java      |   7 +-
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |  60 ++++++++++
 .../hive/ql/io/orc/TestInputOutputFormat.java   |   4 +-
 .../hive/ql/txn/compactor/TestWorker.java       |  28 ++---
 .../results/clientpositive/acid_nullscan.q.out  |   8 +-
 .../clientpositive/acid_table_stats.q.out       |  14 +--
 .../clientpositive/autoColumnStats_4.q.out      |   4 +-
 .../llap/acid_bucket_pruning.q.out              |   4 +-
 .../clientpositive/llap/orc_llap_counters.q.out |  84 +++++++-------
 .../llap/orc_llap_counters1.q.out               |   4 +-
 .../clientpositive/llap/orc_ppd_basic.q.out     | 102 ++++++++---------
 .../llap/orc_ppd_schema_evol_3a.q.out           | 100 ++++++++---------
 .../test/results/clientpositive/row__id.q.out   |  18 +--
 19 files changed, 386 insertions(+), 257 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index b042049..805fddb 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -20,6 +20,7 @@ package org.apache.hive.hcatalog.streaming;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
+import java.io.FileFilter;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
@@ -2057,7 +2058,13 @@ public class TestStreaming {
       if(!deltaDir.getName().startsWith("delta")) {
         continue;
       }
-      File[] bucketFiles = deltaDir.listFiles();
+      File[] bucketFiles = deltaDir.listFiles(new FileFilter() {
+        @Override
+        public boolean accept(File pathname) {
+          String name = pathname.getName();
+          return !name.startsWith("_") && !name.startsWith(".");
+        }
+      });
       for (File bucketFile : bucketFiles) {
         if(bucketFile.toString().endsWith("length")) {
           continue;

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index ff62863..c084fa0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.Utilities.MissingBucketsContext;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.io.AcidUtils.Operation;
 import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveKey;
@@ -264,15 +263,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         }
         FileUtils.mkdir(fs, finalPaths[idx].getParent(), hconf);
       }
-      // If we're updating or deleting there may be no file to close.  This can happen
-      // because the where clause strained out all of the records for a given bucket.  So
-      // before attempting the rename below, check if our file exists.  If it doesn't,
-      // then skip the rename.  If it does try it.  We could just blindly try the rename
-      // and avoid the extra stat, but that would mask other errors.
-      Operation acidOp = conf.getWriteType();
-      boolean needToRename = outPaths[idx] != null && ((acidOp != Operation.UPDATE
-          && acidOp != Operation.DELETE) || fs.exists(outPaths[idx]));
-      if (needToRename && outPaths[idx] != null) {
+      if(outPaths[idx] != null && fs.exists(outPaths[idx])) {
         if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
           Utilities.FILE_OP_LOGGER.trace("committing " + outPaths[idx] + " to "
               + finalPaths[idx] + " (" + isMmTable + ")");

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 70fcd2c..8dc1e8a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -18,17 +18,7 @@
 
 package org.apache.hadoop.hive.ql.io;
 
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.regex.Pattern;
-
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -40,14 +30,15 @@ import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 import org.apache.hadoop.hive.metastore.api.DataOperationType;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.io.orc.OrcFile;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater;
 import org.apache.hadoop.hive.ql.io.orc.Reader;
+import org.apache.hadoop.hive.ql.io.orc.Writer;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
@@ -61,7 +52,17 @@ import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+import java.io.Serializable;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.regex.Pattern;
 
 import static org.apache.hadoop.hive.ql.exec.Utilities.COPY_KEYWORD;
 
@@ -1659,4 +1660,88 @@ public class AcidUtils {
       }
     }
   }
+
+  /**
+   * Logic related to versioning acid data format.  An {@code ACID_FORMAT} file is written to each
+   * base/delta/delete_delta dir written by a full acid write or compaction.  This is the primary
+   * mechanism for versioning acid data.
+   *
+   * Each individual ORC file written stores the current version as a user property in ORC footer.
+   * All data files produced by Acid write should have this (starting with Hive 3.0), including
+   * those written by compactor.  This is more for sanity checking in case someone moved the files
+   * around or something like that.
+   */
+  public static final class OrcAcidVersion {
+    private static final String ACID_VERSION_KEY = "hive.acid.version";
+    private static final String ACID_FORMAT = "_orc_acid_version";
+    public static final int ORC_ACID_VERSION_DEFAULT = 0;
+    /**
+     * 2 is the version of Acid released in Hive 3.0.
+     */
+    public static final int ORC_ACID_VERSION = 2;
+    /**
+     * Inlucde current acid version in file footer.
+     * @param writer - file written
+     */
+    public static void setAcidVersionInDataFile(Writer writer) {
+      //so that we know which version wrote the file
+      ByteBuffer bf = ByteBuffer.allocate(4).putInt(ORC_ACID_VERSION);
+      bf.rewind(); //don't ask - some ByteBuffer weridness. w/o this, empty buffer is written
+      writer.addUserMetadata(ACID_VERSION_KEY, bf);
+    }
+    /**
+     * This is smart enough to handle streaming ingest where there could be a
+     * {@link OrcAcidUtils#DELTA_SIDE_FILE_SUFFIX} side file.
+     * @param dataFile - ORC acid data file
+     * @return version property from file if there,
+     *          {@link #ORC_ACID_VERSION_DEFAULT} otherwise
+     */
+    @VisibleForTesting
+    public static int getAcidVersionFromDataFile(Path dataFile, FileSystem fs) throws IOException {
+      FileStatus fileStatus = fs.getFileStatus(dataFile);
+      Reader orcReader = OrcFile.createReader(dataFile,
+          OrcFile.readerOptions(fs.getConf())
+              .filesystem(fs)
+              //make sure to check for side file in case streaming ingest died
+              .maxLength(getLogicalLength(fs, fileStatus)));
+      if(orcReader.hasMetadataValue(ACID_VERSION_KEY)) {
+        return orcReader.getMetadataValue(ACID_VERSION_KEY).getInt();
+      }
+      return ORC_ACID_VERSION_DEFAULT;
+    }
+    /**
+     * This creates a version file in {@code deltaOrBaseDir}
+     * @param deltaOrBaseDir - where to create the version file
+     */
+    public static void writeVersionFile(Path deltaOrBaseDir, FileSystem fs)  throws IOException {
+      Path formatFile = getVersionFilePath(deltaOrBaseDir);
+      if(!fs.exists(formatFile)) {
+        try (FSDataOutputStream strm = fs.create(formatFile, false)) {
+          strm.writeInt(ORC_ACID_VERSION);
+        } catch (IOException ioe) {
+          LOG.error("Failed to create " + formatFile + " due to: " + ioe.getMessage(), ioe);
+          throw ioe;
+        }
+      }
+    }
+    public static Path getVersionFilePath(Path deltaOrBase) {
+      return new Path(deltaOrBase, ACID_FORMAT);
+    }
+    @VisibleForTesting
+    public static int getAcidVersionFromMetaFile(Path deltaOrBaseDir, FileSystem fs)
+        throws IOException {
+      Path formatFile = getVersionFilePath(deltaOrBaseDir);
+      if(!fs.exists(formatFile)) {
+        LOG.debug(formatFile + " not found, returning default: " + ORC_ACID_VERSION_DEFAULT);
+        return ORC_ACID_VERSION_DEFAULT;
+      }
+      try (FSDataInputStream inputStream = fs.open(formatFile)) {
+        return inputStream.readInt();
+      }
+      catch(IOException ex) {
+        LOG.error(formatFile + " is unreadable due to: " + ex.getMessage(), ex);
+        throw ex;
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
index 57e005d..e69d1a0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
@@ -40,19 +40,11 @@ import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.NullWritable;
@@ -300,6 +292,7 @@ public class OrcOutputFormat extends FileOutputFormat<NullWritable, OrcSerdeRow>
     opts.inspector(options.getInspector())
         .callback(watcher);
     final Writer writer = OrcFile.createWriter(filename, opts);
+    AcidUtils.OrcAcidVersion.setAcidVersionInDataFile(writer);
     return new org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter() {
       @Override
       public void write(Writable w) throws IOException {

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index 2e4db22..970af0e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
-import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
@@ -63,9 +62,6 @@ public class OrcRecordUpdater implements RecordUpdater {
   private static final Logger LOG = LoggerFactory.getLogger(OrcRecordUpdater.class);
 
   static final String ACID_KEY_INDEX_NAME = "hive.acid.key.index";
-  private static final String ACID_FORMAT = "_orc_acid_version";
-  private static final int ORC_ACID_VERSION = 0;
-
 
   final static int INSERT_OPERATION = 0;
   final static int UPDATE_OPERATION = 1;
@@ -86,6 +82,7 @@ public class OrcRecordUpdater implements RecordUpdater {
   final static long DELTA_STRIPE_SIZE = 16 * 1024 * 1024;
 
   private static final Charset UTF8 = Charset.forName("UTF-8");
+  private static final CharsetDecoder utf8Decoder = UTF8.newDecoder();
 
   private final AcidOutputFormat.Options options;
   private final AcidUtils.AcidOperationalProperties acidOperationalProperties;
@@ -195,9 +192,9 @@ public class OrcRecordUpdater implements RecordUpdater {
     return new OrcStruct.OrcStructInspector(fields);
   }
   /**
-   * @param path - partition root
+   * @param partitionRoot - partition root (or table root if not partitioned)
    */
-  OrcRecordUpdater(Path path,
+  OrcRecordUpdater(Path partitionRoot,
                    AcidOutputFormat.Options options) throws IOException {
     this.options = options;
     // Initialize acidOperationalProperties based on table properties, and
@@ -225,27 +222,16 @@ public class OrcRecordUpdater implements RecordUpdater {
       }
     }
     this.bucket.set(bucketCodec.encode(options));
-    this.path = AcidUtils.createFilename(path, options);
+    this.path = AcidUtils.createFilename(partitionRoot, options);
     this.deleteEventWriter = null;
     this.deleteEventPath = null;
     FileSystem fs = options.getFilesystem();
     if (fs == null) {
-      fs = path.getFileSystem(options.getConfiguration());
+      fs = partitionRoot.getFileSystem(options.getConfiguration());
     }
     this.fs = fs;
-    Path formatFile = new Path(path, ACID_FORMAT);
-    if(!fs.exists(formatFile)) {
-      try (FSDataOutputStream strm = fs.create(formatFile, false)) {
-        strm.writeInt(ORC_ACID_VERSION);
-      } catch (IOException ioe) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Failed to create " + path + "/" + ACID_FORMAT + " with " +
-            ioe);
-        }
-      }
-    }
     if (options.getMinimumWriteId() != options.getMaximumWriteId()
-        && !options.isWritingBase()){
+        && !options.isWritingBase()) {
       //throw if file already exists as that should never happen
       flushLengths = fs.create(OrcAcidUtils.getSideFile(this.path), false, 8,
           options.getReporter());
@@ -282,7 +268,7 @@ public class OrcRecordUpdater implements RecordUpdater {
         // This writes to a file in directory which starts with "delete_delta_..."
         // The actual initialization of a writer only happens if any delete events are written
         //to avoid empty files.
-        this.deleteEventPath = AcidUtils.createFilename(path, deleteOptions);
+        this.deleteEventPath = AcidUtils.createFilename(partitionRoot, deleteOptions);
         /**
          * HIVE-14514 is not done so we can't clone writerOptions().  So here we create a new
          * options object to make sure insert and delete writers don't share them (like the
@@ -319,7 +305,6 @@ public class OrcRecordUpdater implements RecordUpdater {
     item.setFieldValue(BUCKET, bucket);
     item.setFieldValue(ROW_ID, rowId);
   }
-
   @Override
   public String toString() {
     return getClass().getName() + "[" + path +"]";
@@ -380,9 +365,7 @@ public class OrcRecordUpdater implements RecordUpdater {
     item.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(operation));
     item.setFieldValue(OrcRecordUpdater.ROW, (operation == DELETE_OPERATION ? null : row));
     indexBuilder.addKey(operation, originalWriteId, bucket.get(), rowId);
-    if (writer == null) {
-      writer = OrcFile.createWriter(path, writerOptions);
-    }
+    initWriter();
     writer.addRow(item);
     restoreBucket(currentBucket, operation);
   }
@@ -416,7 +399,9 @@ public class OrcRecordUpdater implements RecordUpdater {
         // Initialize an indexBuilder for deleteEvents. (HIVE-17284)
         deleteEventIndexBuilder = new KeyIndexBuilder("delete");
         this.deleteEventWriter = OrcFile.createWriter(deleteEventPath,
-                                                      deleteWriterOptions.callback(deleteEventIndexBuilder));
+            deleteWriterOptions.callback(deleteEventIndexBuilder));
+        AcidUtils.OrcAcidVersion.setAcidVersionInDataFile(deleteEventWriter);
+        AcidUtils.OrcAcidVersion.writeVersionFile(this.deleteEventPath.getParent(), fs);
       }
 
       // A delete/update generates a delete event for the original row.
@@ -482,9 +467,7 @@ public class OrcRecordUpdater implements RecordUpdater {
       throw new IllegalStateException("Attempting to flush a RecordUpdater on "
          + path + " with a single transaction.");
     }
-    if (writer == null) {
-      writer = OrcFile.createWriter(path, writerOptions);
-    }
+    initWriter();
     long len = writer.writeIntermediateFooter();
     flushLengths.writeLong(len);
     OrcInputFormat.SHIMS.hflush(flushLengths);
@@ -507,10 +490,8 @@ public class OrcRecordUpdater implements RecordUpdater {
           writer.close(); // normal close, when there are inserts.
         }
       } else {
-        if (writer == null) {
-          //so that we create empty bucket files when needed (but see HIVE-17138)
-          writer = OrcFile.createWriter(path, writerOptions);
-        }
+        //so that we create empty bucket files when needed (but see HIVE-17138)
+        initWriter();
         writer.close(); // normal close.
       }
       if (deleteEventWriter != null) {
@@ -531,6 +512,13 @@ public class OrcRecordUpdater implements RecordUpdater {
     deleteEventWriter = null;
     writerClosed = true;
   }
+  private void initWriter() throws IOException {
+    if (writer == null) {
+      writer = OrcFile.createWriter(path, writerOptions);
+      AcidUtils.OrcAcidVersion.setAcidVersionInDataFile(writer);
+      AcidUtils.OrcAcidVersion.writeVersionFile(path.getParent(), fs);
+    }
+  }
 
   @Override
   public SerDeStats getStats() {
@@ -541,9 +529,6 @@ public class OrcRecordUpdater implements RecordUpdater {
     return stats;
   }
 
-  private static final Charset utf8 = Charset.forName("UTF-8");
-  private static final CharsetDecoder utf8Decoder = utf8.newDecoder();
-
   static RecordIdentifier[] parseKeyIndex(Reader reader) {
     String[] stripes;
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 8b0af3e..baa9070 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -3648,13 +3648,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
           └── -ext-10000
             ├── HIVE_UNION_SUBDIR_1
             │   └── 000000_0
-            │       ├── _orc_acid_version
             │       └── delta_0000019_0000019_0001
+            │           ├── _orc_acid_version
             │           └── bucket_00000
             ├── HIVE_UNION_SUBDIR_2
             │   └── 000000_0
-            │       ├── _orc_acid_version
             │       └── delta_0000019_0000019_0002
+            │           ├── _orc_acid_version
             │           └── bucket_00000
            The assumption is that we either have all data in subdirs or root of srcPath
            but not both.
@@ -3713,7 +3713,10 @@ private void constructOneLBLocationMap(FileStatus fSta,
       try {
         if (!createdDeltaDirs.contains(deltaDest)) {
           try {
-            fs.mkdirs(deltaDest);
+            if(fs.mkdirs(deltaDest)) {
+              fs.rename(AcidUtils.OrcAcidVersion.getVersionFilePath(deltaStat.getPath()),
+                  AcidUtils.OrcAcidVersion.getVersionFilePath(deltaDest));
+            }
             createdDeltaDirs.add(deltaDest);
           } catch (IOException swallowIt) {
             // Don't worry about this, as it likely just means it's already been created.

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 5c13781..31da66a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.io.AcidUtils.AcidOperationalProperties;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
@@ -935,6 +934,7 @@ public class CompactorMR {
             " not found.  Assuming 0 splits.  Creating " + newDeltaDir);
         fs.mkdirs(newDeltaDir);
         createCompactorMarker(conf, newDeltaDir, fs);
+        AcidUtils.OrcAcidVersion.writeVersionFile(newDeltaDir, fs);
         return;
       }
       FileStatus[] contents = fs.listStatus(tmpLocation);//expect 1 base or delta dir in this list
@@ -943,7 +943,12 @@ public class CompactorMR {
       for (FileStatus fileStatus : contents) {
         //newPath is the base/delta dir
         Path newPath = new Path(finalLocation, fileStatus.getPath().getName());
+        /*rename(A, B) has "interesting" behavior if A and B are directories. If  B doesn't exist,
+        * it does the expected operation and everything that was in A is now in B.  If B exists,
+        * it will make A a child of B...  thus make sure the rename() is done before creating the
+        * meta files which will create base_x/ (i.e. B)...*/
         fs.rename(fileStatus.getPath(), newPath);
+        AcidUtils.OrcAcidVersion.writeVersionFile(newPath, fs);
         createCompactorMarker(conf, newPath, fs);
       }
       fs.delete(tmpLocation, true);

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index 470856b..b90f5b1 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -17,16 +17,20 @@
  */
 package org.apache.hadoop.hive.ql;
 
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.LockState;
 import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.TxnInfo;
 import org.apache.hadoop.hive.metastore.api.TxnState;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
@@ -842,4 +846,60 @@ public class TestTxnCommands extends TxnCommandsBaseForTests {
     int[][] expected = {{0, -1},{0, -1}, {1, -1}, {1, -1}, {2, -1}, {2, -1}, {3, -1}, {3, -1}};
     Assert.assertEquals(stringifyValues(expected), r);
   }
+  @Test
+  public void testVersioning() throws Exception {
+    hiveConf.set(MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID.getVarname(), "true");
+    runStatementOnDriver("drop table if exists T");
+    runStatementOnDriver("create table T (a int, b int) stored as orc");
+    int[][] data = {{1, 2}};
+    //create 1 delta file bucket_00000
+    runStatementOnDriver("insert into T" + makeValuesClause(data));
+
+    //delete the bucket files so now we have empty delta dirs
+    List<String> rs = runStatementOnDriver("select distinct INPUT__FILE__NAME from T");
+    FileSystem fs = FileSystem.get(hiveConf);
+    Assert.assertTrue(rs != null && rs.size() == 1 && rs.get(0).contains(AcidUtils.DELTA_PREFIX));
+    Path  filePath = new Path(rs.get(0));
+    int version = AcidUtils.OrcAcidVersion.getAcidVersionFromDataFile(filePath, fs);
+    //check it has expected version marker
+    Assert.assertEquals("Unexpected version marker in " + filePath,
+        AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, version);
+
+    //check that delta dir has a version file with expected value
+    filePath = filePath.getParent();
+    Assert.assertTrue(filePath.getName().startsWith(AcidUtils.DELTA_PREFIX));
+    int versionFromMetaFile = AcidUtils.OrcAcidVersion
+                                  .getAcidVersionFromMetaFile(filePath, fs);
+    Assert.assertEquals("Unexpected version marker in " + filePath,
+        AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, versionFromMetaFile);
+
+    runStatementOnDriver("insert into T" + makeValuesClause(data));
+    runStatementOnDriver("alter table T compact 'major'");
+    TestTxnCommands2.runWorker(hiveConf);
+
+    //check status of compaction job
+    TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
+    ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
+    Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
+    Assert.assertEquals("Unexpected 0 compaction state",
+        TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
+    Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local"));
+
+    rs = runStatementOnDriver("select distinct INPUT__FILE__NAME from T");
+    Assert.assertTrue(rs != null && rs.size() == 1 && rs.get(0).contains(AcidUtils.BASE_PREFIX));
+
+    filePath = new Path(rs.get(0));
+    version = AcidUtils.OrcAcidVersion.getAcidVersionFromDataFile(filePath, fs);
+    //check that files produced by compaction still have the version marker
+    Assert.assertEquals("Unexpected version marker in " + filePath,
+        AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, version);
+
+    //check that compacted base dir has a version file with expected value
+    filePath = filePath.getParent();
+    Assert.assertTrue(filePath.getName().startsWith(AcidUtils.BASE_PREFIX));
+    versionFromMetaFile = AcidUtils.OrcAcidVersion.getAcidVersionFromMetaFile(
+        filePath, fs);
+    Assert.assertEquals("Unexpected version marker in " + filePath,
+        AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, versionFromMetaFile);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index ff76002..0ac29fa 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -2473,14 +2473,14 @@ public class TestInputOutputFormat {
     assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00000",
         split.getPath().toString());
     assertEquals(0, split.getStart());
-    assertEquals(648, split.getLength());
+    assertEquals(663, split.getLength());
     split = (HiveInputFormat.HiveInputSplit) splits[1];
     assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
         split.inputFormatClassName());
     assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00001",
         split.getPath().toString());
     assertEquals(0, split.getStart());
-    assertEquals(674, split.getLength());
+    assertEquals(690, split.getLength());
     CombineHiveInputFormat.CombineHiveInputSplit combineSplit =
         (CombineHiveInputFormat.CombineHiveInputSplit) splits[2];
     assertEquals(BUCKETS, combineSplit.getNumPaths());

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
index 0353ebf..1d9c9a7 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
@@ -306,7 +306,7 @@ public class TestWorker extends CompactorTest {
     for (int i = 0; i < stat.length; i++) {
       if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) {
         sawNewDelta = true;
-        FileStatus[] buckets = fs.listStatus(stat[i].getPath());
+        FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
@@ -315,7 +315,7 @@ public class TestWorker extends CompactorTest {
       }
       if (stat[i].getPath().getName().equals(makeDeleteDeltaDirNameCompacted(21, 24))) {
         sawNewDelta = true;
-        FileStatus[] buckets = fs.listStatus(stat[i].getPath());
+        FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
@@ -438,7 +438,7 @@ public class TestWorker extends CompactorTest {
     for (int i = 0; i < stat.length; i++) {
       if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) {
         sawNewDelta = true;
-        FileStatus[] buckets = fs.listStatus(stat[i].getPath());
+        FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
@@ -446,14 +446,14 @@ public class TestWorker extends CompactorTest {
         Assert.assertEquals(104L, buckets[1].getLen());
       }
       if (stat[i].getPath().getName().equals(makeDeleteDeltaDirNameCompacted(21, 24))) {
-          sawNewDelta = true;
-          FileStatus[] buckets = fs.listStatus(stat[i].getPath());
-          Assert.assertEquals(2, buckets.length);
-          Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
-          Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
-          Assert.assertEquals(104L, buckets[0].getLen());
-          Assert.assertEquals(104L, buckets[1].getLen());
-        } else {
+        sawNewDelta = true;
+        FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
+        Assert.assertEquals(2, buckets.length);
+        Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
+        Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
+        Assert.assertEquals(104L, buckets[0].getLen());
+        Assert.assertEquals(104L, buckets[1].getLen());
+      } else {
         LOG.debug("This is not the delta file you are looking for " + stat[i].getPath().getName());
       }
     }
@@ -490,7 +490,7 @@ public class TestWorker extends CompactorTest {
     for (int i = 0; i < stat.length; i++) {
       if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(1, 4))) {
         sawNewDelta = true;
-        FileStatus[] buckets = fs.listStatus(stat[i].getPath());
+        FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
@@ -499,7 +499,7 @@ public class TestWorker extends CompactorTest {
       }
       if (stat[i].getPath().getName().equals(makeDeleteDeltaDirNameCompacted(1, 4))) {
         sawNewDelta = true;
-        FileStatus[] buckets = fs.listStatus(stat[i].getPath());
+        FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
@@ -842,7 +842,7 @@ public class TestWorker extends CompactorTest {
     for (int i = 0; i < stat.length; i++) {
       if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) {
         sawNewDelta = true;
-        FileStatus[] buckets = fs.listStatus(stat[i].getPath());
+        FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/results/clientpositive/acid_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_nullscan.q.out b/ql/src/test/results/clientpositive/acid_nullscan.q.out
index 76df2b6..d5070d3 100644
--- a/ql/src/test/results/clientpositive/acid_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/acid_nullscan.q.out
@@ -42,12 +42,12 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: acid_vectorized
-            Statistics: Num rows: 1 Data size: 24540 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 25030 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
               predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 24540 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 25030 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(a)
                 mode: hash
@@ -83,7 +83,7 @@ STAGE PLANS:
               serialization.ddl struct acid_vectorized { i32 a, string b}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
-              totalSize 2454
+              totalSize 2503
               transactional true
               transactional_properties default
 #### A masked pattern was here ####
@@ -106,7 +106,7 @@ STAGE PLANS:
                 serialization.ddl struct acid_vectorized { i32 a, string b}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                totalSize 2454
+                totalSize 2503
                 transactional true
                 transactional_properties default
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/results/clientpositive/acid_table_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_table_stats.q.out b/ql/src/test/results/clientpositive/acid_table_stats.q.out
index fa6c666..39777a6 100644
--- a/ql/src/test/results/clientpositive/acid_table_stats.q.out
+++ b/ql/src/test/results/clientpositive/acid_table_stats.q.out
@@ -95,7 +95,7 @@ Partition Parameters:
 	numFiles            	2                   
 	numRows             	0                   
 	rawDataSize         	0                   
-	totalSize           	3949                
+	totalSize           	3980                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -133,9 +133,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: acid
-            Statistics: Num rows: 81 Data size: 39490 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 81 Data size: 39800 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 81 Data size: 39490 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 81 Data size: 39800 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
                 mode: hash
@@ -210,7 +210,7 @@ Partition Parameters:
 	numFiles            	2                   
 	numRows             	1000                
 	rawDataSize         	208000              
-	totalSize           	3949                
+	totalSize           	3980                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -261,7 +261,7 @@ Partition Parameters:
 	numFiles            	2                   
 	numRows             	1000                
 	rawDataSize         	208000              
-	totalSize           	3949                
+	totalSize           	3980                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -386,7 +386,7 @@ Partition Parameters:
 	numFiles            	4                   
 	numRows             	1000                
 	rawDataSize         	208000              
-	totalSize           	7890                
+	totalSize           	7952                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -433,7 +433,7 @@ Partition Parameters:
 	numFiles            	4                   
 	numRows             	2000                
 	rawDataSize         	416000              
-	totalSize           	7890                
+	totalSize           	7952                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
index 9c0e020..fea8acb 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
@@ -197,7 +197,7 @@ Table Parameters:
 	numFiles            	2                   
 	numRows             	0                   
 	rawDataSize         	0                   
-	totalSize           	1798                
+	totalSize           	1834                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -241,7 +241,7 @@ Table Parameters:
 	numFiles            	4                   
 	numRows             	0                   
 	rawDataSize         	0                   
-	totalSize           	2884                
+	totalSize           	2955                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
index fba0158..feca9c7 100644
--- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
@@ -103,7 +103,7 @@ STAGE PLANS:
                     serialization.ddl struct acidtbldefault { i32 a}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                    totalSize 32616
+                    totalSize 32918
                     transactional true
                     transactional_properties default
 #### A masked pattern was here ####
@@ -127,7 +127,7 @@ STAGE PLANS:
                       serialization.ddl struct acidtbldefault { i32 a}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                      totalSize 32616
+                      totalSize 32918
                       transactional true
                       transactional_properties default
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
index aa2dcc7..cd3023f 100644
--- a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
@@ -252,7 +252,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 16673
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 6
+   HDFS_READ_OPS: 7
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -283,7 +283,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -302,7 +302,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 1055
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -334,7 +334,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -364,7 +364,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -394,7 +394,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -424,7 +424,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -454,7 +454,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -484,7 +484,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -514,7 +514,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -544,7 +544,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -574,7 +574,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -604,7 +604,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -634,7 +634,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -664,7 +664,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 103
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -694,7 +694,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -713,7 +713,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -732,7 +732,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -759,7 +759,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -786,7 +786,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -805,7 +805,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 5691
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -837,7 +837,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -867,7 +867,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -897,7 +897,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -916,7 +916,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -946,7 +946,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -973,7 +973,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1003,7 +1003,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1033,7 +1033,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1063,7 +1063,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1093,7 +1093,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1123,7 +1123,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1153,7 +1153,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1183,7 +1183,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1213,7 +1213,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1243,7 +1243,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1270,7 +1270,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1297,7 +1297,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1324,7 +1324,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1354,7 +1354,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1384,7 +1384,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1414,7 +1414,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out b/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
index eb54a81..e7a1a5b 100644
--- a/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
@@ -252,7 +252,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 17728
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 7
+   HDFS_READ_OPS: 8
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -284,7 +284,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
index 590437c..75a2908 100644
--- a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
@@ -205,7 +205,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 16673
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 6
+   HDFS_READ_OPS: 7
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -236,7 +236,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -255,7 +255,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 1055
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -287,7 +287,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -317,7 +317,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -347,7 +347,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -377,7 +377,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -407,7 +407,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -437,7 +437,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -467,7 +467,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -497,7 +497,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -527,7 +527,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -557,7 +557,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -587,7 +587,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -617,7 +617,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 103
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -647,7 +647,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -666,7 +666,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -685,7 +685,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -712,7 +712,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -739,7 +739,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -758,7 +758,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 5691
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -790,7 +790,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -820,7 +820,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -850,7 +850,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -869,7 +869,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -899,7 +899,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -926,7 +926,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -956,7 +956,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -986,7 +986,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1016,7 +1016,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1046,7 +1046,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1076,7 +1076,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1106,7 +1106,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1136,7 +1136,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1166,7 +1166,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1196,7 +1196,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1223,7 +1223,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1250,7 +1250,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1277,7 +1277,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1307,7 +1307,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1337,7 +1337,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1367,7 +1367,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1397,7 +1397,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 4912
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 4
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1429,7 +1429,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 1751
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 4
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1461,7 +1461,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1491,7 +1491,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1522,7 +1522,7 @@ PREHOOK: Output: default@orc_ppd_1
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 10129
    HDFS_BYTES_WRITTEN: 1415
-   HDFS_READ_OPS: 5
+   HDFS_READ_OPS: 6
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 3
 Stage-1 HIVE COUNTERS:
@@ -1552,7 +1552,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 1539
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 6
+   HDFS_READ_OPS: 7
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1584,7 +1584,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1614,7 +1614,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1644,7 +1644,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out b/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
index c7c8993..d9572e7 100644
--- a/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
@@ -205,7 +205,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 17008
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 6
+   HDFS_READ_OPS: 7
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -237,7 +237,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -256,7 +256,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -286,7 +286,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 720
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 4
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -318,7 +318,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -348,7 +348,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -382,7 +382,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -412,7 +412,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -431,7 +431,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -461,7 +461,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -491,7 +491,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -521,7 +521,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -555,7 +555,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -585,7 +585,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -604,7 +604,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -634,7 +634,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -664,7 +664,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -694,7 +694,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -728,7 +728,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -758,7 +758,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -777,7 +777,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -807,7 +807,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -837,7 +837,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -867,7 +867,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -901,7 +901,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 16898
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -925,7 +925,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 17728
    HDFS_BYTES_WRITTEN: 104
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -949,7 +949,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 16898
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -973,7 +973,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 17728
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -997,7 +997,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 16898
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1021,7 +1021,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 17728
    HDFS_BYTES_WRITTEN: 102
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1045,7 +1045,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 4912
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 4
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1077,7 +1077,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 1751
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 4
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1113,7 +1113,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 21458
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1137,7 +1137,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 23336
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1165,7 +1165,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 21458
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1189,7 +1189,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 23336
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1213,7 +1213,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 4099
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 4
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1245,7 +1245,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 1592
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 4
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1281,7 +1281,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 20629
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1305,7 +1305,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 22364
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1333,7 +1333,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1363,7 +1363,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1397,7 +1397,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 20629
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1421,7 +1421,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 22364
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1449,7 +1449,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1479,7 +1479,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 0
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 2
+   HDFS_READ_OPS: 3
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1513,7 +1513,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 2183
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 4
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1545,7 +1545,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 18747
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1569,7 +1569,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 1217
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 4
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1601,7 +1601,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
    HDFS_BYTES_READ: 20073
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:

http://git-wip-us.apache.org/repos/asf/hive/blob/388945f1/ql/src/test/results/clientpositive/row__id.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/row__id.q.out b/ql/src/test/results/clientpositive/row__id.q.out
index 6659327..bf10c6e 100644
--- a/ql/src/test/results/clientpositive/row__id.q.out
+++ b/ql/src/test/results/clientpositive/row__id.q.out
@@ -62,23 +62,23 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hello_acid
-            Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 75 Data size: 18890 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ROW__ID.transactionid (type: bigint)
               outputColumnNames: _col0
-              Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 75 Data size: 18890 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: bigint)
                 sort order: +
-                Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 75 Data size: 18890 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: bigint)
           outputColumnNames: _col0
-          Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 75 Data size: 18890 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 75 Data size: 18890 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -123,17 +123,17 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hello_acid
-            Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 75 Data size: 18890 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (ROW__ID.transactionid = 3) (type: boolean)
-              Statistics: Num rows: 36 Data size: 9205 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 37 Data size: 9319 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ROW__ID.transactionid (type: bigint)
                 outputColumnNames: _col0
-                Statistics: Num rows: 36 Data size: 9205 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 37 Data size: 9319 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 36 Data size: 9205 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 37 Data size: 9319 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat