You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2018/01/23 18:52:59 UTC

[3/3] hive git commit: HIVE-17833: Publish split generation counters (Prasanth Jayachandran reviewed by Sergey Shelukhin)

HIVE-17833: Publish split generation counters (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fff86f3a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fff86f3a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fff86f3a

Branch: refs/heads/master
Commit: fff86f3a6d03fdabfba3c1dc8d3a04c48be93aa8
Parents: 525f8ab
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Tue Jan 23 10:52:41 2018 -0800
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Tue Jan 23 10:52:41 2018 -0800

----------------------------------------------------------------------
 .../hive/jdbc/TestTriggersNoTezSessionPool.java |   24 +-
 .../jdbc/TestTriggersTezSessionPoolManager.java |   60 +-
 .../test/resources/testconfiguration.properties |    1 +
 .../hadoop/hive/ql/exec/ReduceSinkOperator.java |   12 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |   14 +
 .../hadoop/hive/ql/exec/spark/SparkTask.java    |    3 +-
 .../hive/ql/exec/tez/HiveInputCounters.java     |   29 +
 .../hive/ql/exec/tez/HiveSplitGenerator.java    |   61 +-
 .../ql/exec/tez/monitoring/TezJobMonitor.java   |  108 +-
 .../ql/hooks/PostExecTezSummaryPrinter.java     |    7 +
 .../hadoop/hive/ql/wm/VertexCounterLimit.java   |   11 +-
 .../apache/hadoop/hive/ql/wm/TestTrigger.java   |   24 +-
 .../queries/clientpositive/tez_input_counters.q |   25 +
 .../clientpositive/llap/dp_counter_mm.q.out     |   48 +
 .../clientpositive/llap/dp_counter_non_mm.q.out |   48 +
 .../clientpositive/llap/orc_llap_counters.q.out |  210 ++
 .../llap/orc_llap_counters1.q.out               |   10 +
 .../clientpositive/llap/orc_ppd_basic.q.out     |  230 ++
 .../llap/orc_ppd_schema_evol_3a.q.out           |  250 ++
 .../llap/tez_input_counters.q.out               | 2303 ++++++++++++++++++
 20 files changed, 3439 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersNoTezSessionPool.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersNoTezSessionPool.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersNoTezSessionPool.java
index e9f6718..2117b68 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersNoTezSessionPool.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersNoTezSessionPool.java
@@ -44,8 +44,28 @@ public class TestTriggersNoTezSessionPool extends AbstractJdbcTriggersTest {
   }
 
   @Test(timeout = 60000)
-  public void testTriggerTotalTasks() throws Exception {
-    Expression expression = ExpressionFactory.fromString("TOTAL_TASKS > 50");
+  public void testTriggerVertexTotalTasks() throws Exception {
+    Expression expression = ExpressionFactory.fromString("VERTEX_TOTAL_TASKS > 50");
+    Trigger trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
+    setupTriggers(Lists.newArrayList(trigger));
+    String query = "select sleep(t1.under_col, 5), t1.value from " + tableName + " t1 join " + tableName +
+      " t2 on t1.under_col>=t2.under_col";
+    runQueryWithTrigger(query, getConfigs(), trigger + " violated");
+  }
+
+  @Test(timeout = 60000)
+  public void testTriggerDAGTotalTasks() throws Exception {
+    Expression expression = ExpressionFactory.fromString("DAG_TOTAL_TASKS > 50");
+    Trigger trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
+    setupTriggers(Lists.newArrayList(trigger));
+    String query = "select sleep(t1.under_col, 5), t1.value from " + tableName + " t1 join " + tableName +
+      " t2 on t1.under_col>=t2.under_col";
+    runQueryWithTrigger(query, getConfigs(), trigger + " violated");
+  }
+
+  @Test(timeout = 60000)
+  public void testTriggerTotalLaunchedTasks() throws Exception {
+    Expression expression = ExpressionFactory.fromString("TOTAL_LAUNCHED_TASKS > 50");
     Trigger trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
     setupTriggers(Lists.newArrayList(trigger));
     String query = "select sleep(t1.under_col, 5), t1.value from " + tableName + " t1 join " + tableName +

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersTezSessionPoolManager.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersTezSessionPoolManager.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersTezSessionPoolManager.java
index 3b6eb71..b2bb443 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersTezSessionPoolManager.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersTezSessionPoolManager.java
@@ -91,7 +91,17 @@ public class TestTriggersTezSessionPoolManager extends AbstractJdbcTriggersTest
 
   @Test(timeout = 60000)
   public void testTriggerTotalTasks() throws Exception {
-    Expression expression = ExpressionFactory.fromString("TOTAL_TASKS > 50");
+    Expression expression = ExpressionFactory.fromString("VERTEX_TOTAL_TASKS > 50");
+    Trigger trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
+    setupTriggers(Lists.newArrayList(trigger));
+    String query = "select sleep(t1.under_col, 5), t1.value from " + tableName + " t1 join " + tableName +
+      " t2 on t1.under_col>=t2.under_col";
+    runQueryWithTrigger(query, getConfigs(), trigger + " violated");
+  }
+
+  @Test(timeout = 60000)
+  public void testTriggerDagTotalTasks() throws Exception {
+    Expression expression = ExpressionFactory.fromString("DAG_TOTAL_TASKS > 50");
     Trigger trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
     setupTriggers(Lists.newArrayList(trigger));
     String query = "select sleep(t1.under_col, 5), t1.value from " + tableName + " t1 join " + tableName +
@@ -219,6 +229,54 @@ public class TestTriggersTezSessionPoolManager extends AbstractJdbcTriggersTest
   }
 
   @Test(timeout = 60000)
+  public void testTriggerDagRawInputSplitsKill() throws Exception {
+    // Map 1 - 55 splits
+    // Map 3 - 55 splits
+    Expression expression = ExpressionFactory.fromString("DAG_RAW_INPUT_SPLITS > 100");
+    Trigger trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
+    setupTriggers(Lists.newArrayList(trigger));
+    String query = "select t1.under_col, t1.value from " + tableName + " t1 join " + tableName +
+      " t2 on t1.under_col>=t2.under_col";
+    runQueryWithTrigger(query, getConfigs(), "Query was cancelled");
+  }
+
+  @Test(timeout = 60000)
+  public void testTriggerVertexRawInputSplitsNoKill() throws Exception {
+    // Map 1 - 55 splits
+    // Map 3 - 55 splits
+    Expression expression = ExpressionFactory.fromString("VERTEX_RAW_INPUT_SPLITS > 100");
+    Trigger trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
+    setupTriggers(Lists.newArrayList(trigger));
+    String query = "select t1.under_col, t1.value from " + tableName + " t1 join " + tableName +
+      " t2 on t1.under_col>=t2.under_col";
+    runQueryWithTrigger(query, getConfigs(), null);
+  }
+
+  @Test(timeout = 60000)
+  public void testTriggerVertexRawInputSplitsKill() throws Exception {
+    // Map 1 - 55 splits
+    // Map 3 - 55 splits
+    Expression expression = ExpressionFactory.fromString("VERTEX_RAW_INPUT_SPLITS > 50");
+    Trigger trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
+    setupTriggers(Lists.newArrayList(trigger));
+    String query = "select t1.under_col, t1.value from " + tableName + " t1 join " + tableName +
+      " t2 on t1.under_col>=t2.under_col";
+    runQueryWithTrigger(query, getConfigs(), "Query was cancelled");
+  }
+
+  @Test(timeout = 60000)
+  public void testTriggerDefaultRawInputSplits() throws Exception {
+    // Map 1 - 55 splits
+    // Map 3 - 55 splits
+    Expression expression = ExpressionFactory.fromString("RAW_INPUT_SPLITS > 50");
+    Trigger trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
+    setupTriggers(Lists.newArrayList(trigger));
+    String query = "select t1.under_col, t1.value from " + tableName + " t1 join " + tableName +
+      " t2 on t1.under_col>=t2.under_col";
+    runQueryWithTrigger(query, getConfigs(), "Query was cancelled");
+  }
+
+  @Test(timeout = 60000)
   public void testMultipleTriggers1() throws Exception {
     Expression shuffleExpression = ExpressionFactory.fromString("HDFS_BYTES_READ > 1000000");
     Trigger shuffleTrigger = new ExecutionTrigger("big_shuffle", shuffleExpression, new Action(Action.Type.KILL_QUERY));

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 53da72b..1017249 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -586,6 +586,7 @@ minillaplocal.query.files=\
   orc_analyze.q,\
   orc_llap_nonvector.q,\
   orc_ppd_date.q,\
+  tez_input_counters.q,\
   orc_ppd_decimal.q,\
   orc_ppd_timestamp.q,\
   order_null.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
index 0e63031..395a5f4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
@@ -163,7 +163,8 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
       cntr = 1;
       logEveryNRows = HiveConf.getLongVar(hconf, HiveConf.ConfVars.HIVE_LOG_N_RECORDS);
 
-      statsMap.put(getCounterName(Counter.RECORDS_OUT_INTERMEDIATE, hconf), recordCounter);
+      final String vertexName = hconf.get(Operator.CONTEXT_NAME_KEY, "");
+      statsMap.put(Utilities.getVertexCounterName(Counter.RECORDS_OUT_INTERMEDIATE.name(), vertexName), recordCounter);
 
       List<ExprNodeDesc> keys = conf.getKeyCols();
 
@@ -248,15 +249,6 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
     }
   }
 
-  public String getCounterName(Counter counter, Configuration hconf) {
-    String context = hconf.get(Operator.CONTEXT_NAME_KEY, "");
-    if (context != null && !context.isEmpty()) {
-      context = "_" + context.replace(" ", "_");
-    }
-    return counter + context;
-  }
-
-
   /**
    * Initializes array of ExprNodeEvaluator. Adds Union field for distinct
    * column indices for group by.

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 4da9d57..2e1fd37 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -3246,6 +3246,20 @@ public final class Utilities {
   }
 
   /**
+   * Appends vertex name to specified counter name.
+   *
+   * @param counter counter to be appended with
+   * @param vertexName   vertex name
+   * @return counter name with vertex name appended
+   */
+  public static String getVertexCounterName(String counter, String vertexName) {
+    if (vertexName != null && !vertexName.isEmpty()) {
+      vertexName = "_" + vertexName.replace(" ", "_");
+    }
+    return counter + vertexName;
+  }
+
+  /**
    * Computes a list of all input paths needed to compute the given MapWork. All aliases
    * are considered and a merged list of input paths is returned. If any input path points
    * to an empty table or partition a dummy file in the scratch dir is instead created and

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
index 324ff17..c6e17b5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
@@ -374,8 +374,9 @@ public class SparkTask extends Task<SparkWork> {
             hiveCounters.add(((FileSinkOperator) operator).getCounterName(counter));
           }
         } else if (operator instanceof ReduceSinkOperator) {
+          final String contextName = conf.get(Operator.CONTEXT_NAME_KEY, "");
           for (ReduceSinkOperator.Counter counter : ReduceSinkOperator.Counter.values()) {
-            hiveCounters.add(((ReduceSinkOperator) operator).getCounterName(counter, conf));
+            hiveCounters.add(Utilities.getVertexCounterName(counter.name(), contextName));
           }
         } else if (operator instanceof ScriptOperator) {
           for (ScriptOperator.Counter counter : ScriptOperator.Counter.values()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveInputCounters.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveInputCounters.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveInputCounters.java
new file mode 100644
index 0000000..085d6a7
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveInputCounters.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.tez;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public enum HiveInputCounters {
+  RAW_INPUT_SPLITS,
+  GROUPED_INPUT_SPLITS,
+  INPUT_FILES,
+  INPUT_DIRECTORIES
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
index 7479641..98f4bc0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
@@ -23,10 +23,16 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.BitSet;
 import java.util.Comparator;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import com.google.common.base.Preconditions;
 
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.tez.common.counters.TezCounters;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -158,7 +164,7 @@ public class HiveSplitGenerator extends InputInitializer {
         // Need to instantiate the realInputFormat
         InputFormat<?, ?> inputFormat =
           (InputFormat<?, ?>) ReflectionUtils.newInstance(JavaUtils.loadClass(realInputFormatName),
-              jobConf);
+            jobConf);
 
         int totalResource = 0;
         int taskResource = 0;
@@ -178,10 +184,10 @@ public class HiveSplitGenerator extends InputInitializer {
         if (HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1) <= 1) {
           // broken configuration from mapred-default.xml
           final long blockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
-              DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
+            DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
           final long minGrouping = conf.getLong(
-              TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE,
-              TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE_DEFAULT);
+            TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE,
+            TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE_DEFAULT);
           final long preferredSplitSize = Math.min(blockSize / 2, minGrouping);
           HiveConf.setLongVar(jobConf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, preferredSplitSize);
           LOG.info("The preferred split size is " + preferredSplitSize);
@@ -189,15 +195,47 @@ public class HiveSplitGenerator extends InputInitializer {
 
         // Create the un-grouped splits
         float waves =
-            conf.getFloat(TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES,
-                TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES_DEFAULT);
+          conf.getFloat(TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES,
+            TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES_DEFAULT);
 
         // Raw splits
         InputSplit[] splits = inputFormat.getSplits(jobConf, (int) (availableSlots * waves));
         // Sort the splits, so that subsequent grouping is consistent.
         Arrays.sort(splits, new InputSplitComparator());
         LOG.info("Number of input splits: " + splits.length + ". " + availableSlots
-            + " available slots, " + waves + " waves. Input format is: " + realInputFormatName);
+          + " available slots, " + waves + " waves. Input format is: " + realInputFormatName);
+
+        // increment/set input counters
+        InputInitializerContext inputInitializerContext = getContext();
+        TezCounters tezCounters = null;
+        String counterName;
+        String groupName = null;
+        String vertexName = null;
+        if (inputInitializerContext != null) {
+          tezCounters = new TezCounters();
+          groupName = HiveInputCounters.class.getName();
+          vertexName = jobConf.get(Operator.CONTEXT_NAME_KEY, "");
+          counterName = Utilities.getVertexCounterName(HiveInputCounters.RAW_INPUT_SPLITS.name(), vertexName);
+          tezCounters.findCounter(groupName, counterName).increment(splits.length);
+          final List<Path> paths = Utilities.getInputPathsTez(jobConf, work);
+          counterName = Utilities.getVertexCounterName(HiveInputCounters.INPUT_DIRECTORIES.name(), vertexName);
+          tezCounters.findCounter(groupName, counterName).increment(paths.size());
+          final Set<String> files = new HashSet<>();
+          for (InputSplit inputSplit : splits) {
+            if (inputSplit instanceof FileSplit) {
+              final FileSplit fileSplit = (FileSplit) inputSplit;
+              final Path path = fileSplit.getPath();
+              // The assumption here is the path is a file. Only case this is different is ACID deltas.
+              // The isFile check is avoided here for performance reasons.
+              final String fileStr = path.toString();
+              if (!files.contains(fileStr)) {
+                files.add(fileStr);
+              }
+            }
+          }
+          counterName = Utilities.getVertexCounterName(HiveInputCounters.INPUT_FILES.name(), vertexName);
+          tezCounters.findCounter(groupName, counterName).increment(files.size());
+        }
 
         if (work.getIncludedBuckets() != null) {
           splits = pruneBuckets(work, splits);
@@ -208,6 +246,15 @@ public class HiveSplitGenerator extends InputInitializer {
         // And finally return them in a flat array
         InputSplit[] flatSplits = groupedSplits.values().toArray(new InputSplit[0]);
         LOG.info("Number of split groups: " + flatSplits.length);
+        if (inputInitializerContext != null) {
+          counterName = Utilities.getVertexCounterName(HiveInputCounters.GROUPED_INPUT_SPLITS.name(), vertexName);
+          tezCounters.findCounter(groupName, counterName).setValue(flatSplits.length);
+
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Published tez counters: " + tezCounters);
+          }
+          inputInitializerContext.addCounters(tezCounters);
+        }
 
         List<TaskLocationHint> locationHints = splitGrouper.createTaskLocationHints(flatSplits, generateConsistentSplits);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
index 55e7d7d..3558475 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
@@ -30,6 +30,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
 
 import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.hadoop.hive.common.log.InPlaceUpdate;
@@ -44,8 +45,8 @@ import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.hadoop.hive.ql.wm.TimeCounterLimit;
-import org.apache.hadoop.hive.ql.wm.WmContext;
 import org.apache.hadoop.hive.ql.wm.VertexCounterLimit;
+import org.apache.hadoop.hive.ql.wm.WmContext;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.apache.tez.common.counters.CounterGroup;
 import org.apache.tez.common.counters.TezCounter;
@@ -169,10 +170,15 @@ public class TezJobMonitor {
         TezCounters dagCounters = status.getDAGCounters();
         vertexProgressMap = status.getVertexProgress();
         wmContext = context.getWmContext();
+        List<String> vertexNames = vertexProgressMap.keySet()
+          .stream()
+          .map(k -> k.replaceAll(" ", "_"))
+          .collect(Collectors.toList());
         if (dagCounters != null && wmContext != null) {
           Set<String> desiredCounters = wmContext.getSubscribedCounters();
           if (desiredCounters != null && !desiredCounters.isEmpty()) {
-            Map<String, Long> currentCounters = getCounterValues(dagCounters, vertexProgressMap, desiredCounters, done);
+            Map<String, Long> currentCounters = getCounterValues(dagCounters, vertexNames, vertexProgressMap,
+              desiredCounters, done);
             wmContext.setCurrentCounters(currentCounters);
           }
         }
@@ -292,32 +298,62 @@ public class TezJobMonitor {
   }
 
   private Map<String, Long> getCounterValues(final TezCounters dagCounters,
-    final Map<String, Progress> vertexProgressMap,
+    final List<String> vertexNames, final Map<String, Progress> vertexProgressMap,
     final Set<String> desiredCounters, final boolean done) {
     // DAG specific counters
     Map<String, Long> updatedCounters = new HashMap<>();
     for (CounterGroup counterGroup : dagCounters) {
       for (TezCounter tezCounter : counterGroup) {
         String counterName = tezCounter.getName();
-        if (desiredCounters.contains(counterName)) {
-          updatedCounters.put(counterName, tezCounter.getValue());
+        for (String desiredCounter : desiredCounters) {
+          if (counterName.equals(desiredCounter)) {
+            updatedCounters.put(counterName, tezCounter.getValue());
+          } else if (isDagLevelCounter(desiredCounter)) {
+            // by default, we aggregate counters across the entire DAG. Example: SHUFFLE_BYTES would mean SHUFFLE_BYTES
+            // of each vertex aggregated together to create DAG level SHUFFLE_BYTES.
+            // Use case: If SHUFFLE_BYTES across the entire DAG is > limit perform action
+            String prefixRemovedCounterName = getCounterFromDagCounter(desiredCounter);
+            aggregateCountersSum(updatedCounters, vertexNames, prefixRemovedCounterName, desiredCounter, tezCounter);
+          } else if (isVertexLevelCounter(desiredCounter)) {
+            // if counter name starts with VERTEX_ then we just return max value across all vertex since trigger
+            // validation is only interested in violation that are greater than limit (*any* vertex violation).
+            // Use case: If SHUFFLE_BYTES for any single vertex is > limit perform action
+            String prefixRemovedCounterName = getCounterFromVertexCounter(desiredCounter);
+            aggregateCountersMax(updatedCounters, vertexNames, prefixRemovedCounterName, desiredCounter, tezCounter);
+          } else if (counterName.startsWith(desiredCounter)) {
+            // Counters with vertex name as suffix
+            // desiredCounter = INPUT_FILES
+            // counters: {INPUT_FILES_Map_1 : 5, INPUT_FILES_Map_4 : 10}
+            // outcome: INPUT_FILE : 15
+            String prefixRemovedCounterName = desiredCounter;
+            aggregateCountersSum(updatedCounters, vertexNames, prefixRemovedCounterName, desiredCounter, tezCounter);
+          }
         }
       }
     }
 
-    // Process per vertex counters.
-    String counterName = VertexCounterLimit.VertexCounter.TOTAL_TASKS.name();
+    // Process per vertex counters that are available only via vertex Progress
+    String counterName = VertexCounterLimit.VertexCounter.VERTEX_TOTAL_TASKS.name();
     if (desiredCounters.contains(counterName) && vertexProgressMap != null) {
       for (Map.Entry<String, Progress> entry : vertexProgressMap.entrySet()) {
-        // TOTAL_TASKS counter is per vertex counter, but triggers are validated at query level
-        // looking for query level violations. So we always choose max TOTAL_TASKS among all vertices.
-        // Publishing TOTAL_TASKS for all vertices is not really useful from the context of triggers.
         long currentMax = 0;
         if (updatedCounters.containsKey(counterName)) {
           currentMax = updatedCounters.get(counterName);
         }
-        long totalTasks = Math.max(currentMax, entry.getValue().getTotalTaskCount());
-        updatedCounters.put(counterName, totalTasks);
+        long newMax = Math.max(currentMax, entry.getValue().getTotalTaskCount());
+        updatedCounters.put(counterName, newMax);
+      }
+    }
+
+    counterName = VertexCounterLimit.VertexCounter.DAG_TOTAL_TASKS.name();
+    if (desiredCounters.contains(counterName) && vertexProgressMap != null) {
+      for (Map.Entry<String, Progress> entry : vertexProgressMap.entrySet()) {
+        long currentTotal = 0;
+        if (updatedCounters.containsKey(counterName)) {
+          currentTotal = updatedCounters.get(counterName);
+        }
+        long newTotal = currentTotal + entry.getValue().getTotalTaskCount();
+        updatedCounters.put(counterName, newTotal);
       }
     }
 
@@ -337,6 +373,54 @@ public class TezJobMonitor {
     return updatedCounters;
   }
 
+  private void aggregateCountersSum(final Map<String, Long> updatedCounters, final List<String> vertexNames,
+    final String prefixRemovedCounterName, final String desiredCounter, final TezCounter tezCounter) {
+    long counterValue = checkVertexSuffixAndGetValue(vertexNames, prefixRemovedCounterName, tezCounter);
+    long currentTotal = 0;
+    if (updatedCounters.containsKey(desiredCounter)) {
+      currentTotal = updatedCounters.get(desiredCounter);
+    }
+    long newTotal = currentTotal + counterValue;
+    updatedCounters.put(desiredCounter, newTotal);
+  }
+
+  private void aggregateCountersMax(final Map<String, Long> updatedCounters, final List<String> vertexNames,
+    final String prefixRemovedCounterName, final String desiredCounter, final TezCounter tezCounter) {
+    long counterValue = checkVertexSuffixAndGetValue(vertexNames, prefixRemovedCounterName, tezCounter);
+    long currentMax = 0;
+    if (updatedCounters.containsKey(desiredCounter)) {
+      currentMax = updatedCounters.get(desiredCounter);
+    }
+    long newMax = Math.max(currentMax, counterValue);
+    updatedCounters.put(desiredCounter, newMax);
+  }
+
+  private long checkVertexSuffixAndGetValue(final List<String> vertexNames, final String counterName,
+    final TezCounter tezCounter) {
+    for (String vertexName : vertexNames) {
+      if (tezCounter.getName().equalsIgnoreCase(counterName + "_" + vertexName)) {
+        return tezCounter.getValue();
+      }
+    }
+    return 0;
+  }
+
+  private String getCounterFromDagCounter(final String desiredCounter) {
+    return desiredCounter.substring("DAG_".length());
+  }
+
+  private String getCounterFromVertexCounter(final String desiredCounter) {
+    return desiredCounter.substring("VERTEX_".length());
+  }
+
+  private boolean isVertexLevelCounter(final String desiredCounter) {
+    return desiredCounter.startsWith("VERTEX_");
+  }
+
+  private boolean isDagLevelCounter(final String desiredCounter) {
+    return desiredCounter.startsWith("DAG_");
+  }
+
   private void printSummary(boolean success, Map<String, Progress> progressMap) {
     if (isProfilingEnabled() && success && progressMap != null) {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
index 012adaa..14ebfa0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
@@ -20,7 +20,9 @@ package org.apache.hadoop.hive.ql.hooks;
 import java.util.List;
 
 import org.apache.hadoop.hive.llap.counters.LlapIOCounters;
+import org.apache.hadoop.hive.ql.exec.tez.HiveInputCounters;
 import org.apache.tez.common.counters.FileSystemCounter;
+import org.apache.tez.dag.api.client.DAGClient;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -66,6 +68,11 @@ public class PostExecTezSummaryPrinter implements ExecuteWithHookContext {
             for (TezCounter counter : group) {
               console.printInfo("   " + counter.getDisplayName() + ": " + counter.getValue(), false);
             }
+          }  else if (group.getName().equals(HiveInputCounters.class.getName())) {
+            console.printInfo(tezTask.getId() + " INPUT COUNTERS:", false);
+            for (TezCounter counter : group) {
+              console.printInfo("   " + counter.getDisplayName() + ": " + counter.getValue(), false);
+            }
           } else if (group.getName().equals(FileSystemCounter.class.getName())) {
             console.printInfo(tezTask.getId() + " FILE SYSTEM COUNTERS:", false);
             for (TezCounter counter : group) {

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/java/org/apache/hadoop/hive/ql/wm/VertexCounterLimit.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/wm/VertexCounterLimit.java b/ql/src/java/org/apache/hadoop/hive/ql/wm/VertexCounterLimit.java
index 7d6482a..5c71aa3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/wm/VertexCounterLimit.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/wm/VertexCounterLimit.java
@@ -20,7 +20,16 @@ package org.apache.hadoop.hive.ql.wm;
  */
 public class VertexCounterLimit implements CounterLimit {
   public enum VertexCounter {
-    TOTAL_TASKS
+    DAG_TOTAL_TASKS,
+    DAG_GROUPED_INPUT_SPLITS,
+    DAG_INPUT_DIRECTORIES,
+    DAG_INPUT_FILES,
+    DAG_RAW_INPUT_SPLITS,
+    VERTEX_TOTAL_TASKS,
+    VERTEX_GROUPED_INPUT_SPLITS,
+    VERTEX_INPUT_DIRECTORIES,
+    VERTEX_INPUT_FILES,
+    VERTEX_RAW_INPUT_SPLITS
   }
 
   private VertexCounter vertexCounter;

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java b/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java
index a3e8336..c5748f5 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java
@@ -72,9 +72,16 @@ public class TestTrigger {
     assertTrue(trigger.apply(100000));
 
     expression = ExpressionFactory.createExpression(new VertexCounterLimit(VertexCounterLimit.VertexCounter
-      .TOTAL_TASKS,10000));
+      .VERTEX_TOTAL_TASKS, 10000));
     trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
-    assertEquals("counter: TOTAL_TASKS limit: 10000", expression.getCounterLimit().toString());
+    assertEquals("counter: VERTEX_TOTAL_TASKS limit: 10000", expression.getCounterLimit().toString());
+    assertFalse(trigger.apply(1000));
+    assertTrue(trigger.apply(100000));
+
+    expression = ExpressionFactory.createExpression(new VertexCounterLimit(VertexCounterLimit.VertexCounter
+      .DAG_TOTAL_TASKS, 10000));
+    trigger = new ExecutionTrigger("highly_parallel", expression, new Action(Action.Type.KILL_QUERY));
+    assertEquals("counter: DAG_TOTAL_TASKS limit: 10000", expression.getCounterLimit().toString());
     assertFalse(trigger.apply(1000));
     assertTrue(trigger.apply(100000));
 
@@ -163,10 +170,17 @@ public class TestTrigger {
     assertEquals(expected, expression);
     assertEquals(expected.hashCode(), expression.hashCode());
 
-    expression = ExpressionFactory.fromString(" TOTAL_TASKS > 10000");
+    expression = ExpressionFactory.fromString(" VERTEX_TOTAL_TASKS > 10000");
+    expected = ExpressionFactory.createExpression(new VertexCounterLimit(VertexCounterLimit.VertexCounter
+      .VERTEX_TOTAL_TASKS, 10000));
+    assertEquals("counter: VERTEX_TOTAL_TASKS limit: 10000", expression.getCounterLimit().toString());
+    assertEquals(expected, expression);
+    assertEquals(expected.hashCode(), expression.hashCode());
+
+    expression = ExpressionFactory.fromString(" DAG_TOTAL_TASKS > 10000");
     expected = ExpressionFactory.createExpression(new VertexCounterLimit(VertexCounterLimit.VertexCounter
-      .TOTAL_TASKS,10000));
-    assertEquals("counter: TOTAL_TASKS limit: 10000", expression.getCounterLimit().toString());
+      .DAG_TOTAL_TASKS, 10000));
+    assertEquals("counter: DAG_TOTAL_TASKS limit: 10000", expression.getCounterLimit().toString());
     assertEquals(expected, expression);
     assertEquals(expected.hashCode(), expression.hashCode());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/test/queries/clientpositive/tez_input_counters.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_input_counters.q b/ql/src/test/queries/clientpositive/tez_input_counters.q
new file mode 100644
index 0000000..d471c3d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/tez_input_counters.q
@@ -0,0 +1,25 @@
+set hive.compute.query.using.stats=false;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.exec.max.dynamic.partitions=400;
+set hive.exec.max.dynamic.partitions.pernode=400;
+set hive.mapred.mode=nonstrict;
+set hive.fetch.task.conversion=none;
+set hive.map.aggr=false;
+-- disabling map side aggregation as that can lead to different intermediate record counts
+set hive.tez.exec.print.summary=true;
+
+create table testpart (k int) partitioned by (v string);
+insert overwrite table testpart partition(v) select * from src;
+insert into table testpart partition(v) select * from src;
+
+set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
+select sum(hash(*)) from testpart;
+select sum(hash(*)) from testpart where v < 'val_100';
+select sum(hash(*)) from testpart where v < 'val_200';
+
+set hive.tez.dynamic.partition.pruning=true;
+create table testpart1 like testpart;
+insert overwrite table testpart1 partition(v) select * from testpart where v < 'val_200';
+
+explain select sum(hash(*)) from testpart t1 join testpart1 t2 on t1.v = t2.v;
+select sum(hash(*)) from testpart t1 join testpart1 t2 on t1.v = t2.v;

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out b/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out
index 662c02a..981f260 100644
--- a/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out
+++ b/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out
@@ -21,6 +21,11 @@ Stage-1 HIVE COUNTERS:
    DESERIALIZE_ERRORS: 0
    RECORDS_IN_Map_1: 500
    RECORDS_OUT_1_default.src2: 84
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: insert into table src2 partition (value) select * from src where key < 200
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
@@ -32,6 +37,11 @@ Stage-1 HIVE COUNTERS:
    DESERIALIZE_ERRORS: 0
    RECORDS_IN_Map_1: 500
    RECORDS_OUT_1_default.src2: 189
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: drop table src2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@src2
@@ -51,6 +61,11 @@ Stage-1 HIVE COUNTERS:
    DESERIALIZE_ERRORS: 0
    RECORDS_IN_Map_1: 500
    RECORDS_OUT_1_default.src2: 189
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: insert into table src2 partition (value) select * from src where key < 300
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
@@ -62,6 +77,11 @@ Stage-1 HIVE COUNTERS:
    DESERIALIZE_ERRORS: 0
    RECORDS_IN_Map_1: 500
    RECORDS_OUT_1_default.src2: 292
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: drop table src2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@src2
@@ -91,6 +111,11 @@ Stage-2 HIVE COUNTERS:
    RECORDS_IN_Map_1: 500
    RECORDS_OUT_1_default.src2: 84
    RECORDS_OUT_2_default.src3: 105
+Stage-2 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: from src
 insert into table src2 partition (value) select * where key < 100
 insert into table src3 partition (value) select * where key >= 100 and key < 300
@@ -106,6 +131,11 @@ Stage-2 HIVE COUNTERS:
    RECORDS_IN_Map_1: 500
    RECORDS_OUT_1_default.src2: 84
    RECORDS_OUT_2_default.src3: 208
+Stage-2 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: drop table src2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@src2
@@ -130,6 +160,15 @@ Stage-1 HIVE COUNTERS:
    RECORDS_IN_Map_1: 500
    RECORDS_IN_Map_3: 500
    RECORDS_OUT_1_default.src2: 189
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   GROUPED_INPUT_SPLITS_Map_3: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_DIRECTORIES_Map_3: 1
+   INPUT_FILES_Map_1: 1
+   INPUT_FILES_Map_3: 1
+   RAW_INPUT_SPLITS_Map_1: 1
+   RAW_INPUT_SPLITS_Map_3: 1
 PREHOOK: query: insert into table src2 partition (value)
 select temps.* from (
   select * from src where key < 100
@@ -146,3 +185,12 @@ Stage-1 HIVE COUNTERS:
    RECORDS_IN_Map_1: 500
    RECORDS_IN_Map_3: 500
    RECORDS_OUT_1_default.src2: 292
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   GROUPED_INPUT_SPLITS_Map_3: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_DIRECTORIES_Map_3: 1
+   INPUT_FILES_Map_1: 1
+   INPUT_FILES_Map_3: 1
+   RAW_INPUT_SPLITS_Map_1: 1
+   RAW_INPUT_SPLITS_Map_3: 1

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out b/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out
index e2ff9a6..9f6426c 100644
--- a/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out
+++ b/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out
@@ -23,6 +23,11 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_0: 57
    RECORDS_OUT_1_default.src2: 84
    RECORDS_OUT_INTERMEDIATE_Map_1: 57
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: insert into table src2 partition (value) select * from src where key < 200
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
@@ -36,6 +41,11 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_0: 121
    RECORDS_OUT_1_default.src2: 189
    RECORDS_OUT_INTERMEDIATE_Map_1: 121
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: drop table src2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@src2
@@ -57,6 +67,11 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_0: 121
    RECORDS_OUT_1_default.src2: 189
    RECORDS_OUT_INTERMEDIATE_Map_1: 121
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: insert into table src2 partition (value) select * from src where key < 300
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
@@ -70,6 +85,11 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_0: 184
    RECORDS_OUT_1_default.src2: 292
    RECORDS_OUT_INTERMEDIATE_Map_1: 184
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: drop table src2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@src2
@@ -101,6 +121,11 @@ Stage-2 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2: 84
    RECORDS_OUT_2_default.src3: 105
    RECORDS_OUT_INTERMEDIATE_Map_1: 121
+Stage-2 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: from src
 insert into table src2 partition (value) select * where key < 100
 insert into table src3 partition (value) select * where key >= 100 and key < 300
@@ -118,6 +143,11 @@ Stage-2 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2: 84
    RECORDS_OUT_2_default.src3: 208
    RECORDS_OUT_INTERMEDIATE_Map_1: 184
+Stage-2 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 PREHOOK: query: drop table src2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@src2
@@ -145,6 +175,15 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2: 189
    RECORDS_OUT_INTERMEDIATE_Map_1: 57
    RECORDS_OUT_INTERMEDIATE_Map_4: 64
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   GROUPED_INPUT_SPLITS_Map_4: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_DIRECTORIES_Map_4: 1
+   INPUT_FILES_Map_1: 1
+   INPUT_FILES_Map_4: 1
+   RAW_INPUT_SPLITS_Map_1: 1
+   RAW_INPUT_SPLITS_Map_4: 1
 PREHOOK: query: insert into table src2 partition (value)
 select temps.* from (
   select * from src where key < 100
@@ -164,3 +203,12 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2: 292
    RECORDS_OUT_INTERMEDIATE_Map_1: 57
    RECORDS_OUT_INTERMEDIATE_Map_4: 127
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   GROUPED_INPUT_SPLITS_Map_4: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_DIRECTORIES_Map_4: 1
+   INPUT_FILES_Map_1: 1
+   INPUT_FILES_Map_4: 1
+   RAW_INPUT_SPLITS_Map_1: 1
+   RAW_INPUT_SPLITS_Map_4: 1

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
index 94e1ce2..aa2dcc7 100644
--- a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
@@ -270,6 +270,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 3
    ROWS_EMITTED: 2100
    SELECTED_ROWGROUPS: 3
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2100
 PREHOOK: query: select count(*) from orc_ppd where t > 127
 PREHOOK: type: QUERY
@@ -284,6 +289,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where t = 55
 PREHOOK: type: QUERY
@@ -311,6 +321,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 8
 PREHOOK: query: select count(*) from orc_ppd where t <=> 50
 PREHOOK: type: QUERY
@@ -336,6 +351,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 22
 PREHOOK: query: select count(*) from orc_ppd where t <=> 100
 PREHOOK: type: QUERY
@@ -361,6 +381,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 16
 PREHOOK: query: select count(*) from orc_ppd where t = "54"
 PREHOOK: type: QUERY
@@ -386,6 +411,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 18
 PREHOOK: query: select count(*) from orc_ppd where t = -10.0
 PREHOOK: type: QUERY
@@ -411,6 +441,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 1
 PREHOOK: query: select count(*) from orc_ppd where t = cast(53 as float)
 PREHOOK: type: QUERY
@@ -436,6 +471,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 32
 PREHOOK: query: select count(*) from orc_ppd where t = cast(53 as double)
 PREHOOK: type: QUERY
@@ -461,6 +501,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 32
 PREHOOK: query: select count(*) from orc_ppd where t < 100
 PREHOOK: type: QUERY
@@ -486,6 +531,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 1697
 PREHOOK: query: select count(*) from orc_ppd where t < 100 and t > 98
 PREHOOK: type: QUERY
@@ -511,6 +561,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 12
 PREHOOK: query: select count(*) from orc_ppd where t <= 100
 PREHOOK: type: QUERY
@@ -536,6 +591,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 1713
 PREHOOK: query: select count(*) from orc_ppd where t is null
 PREHOOK: type: QUERY
@@ -561,6 +621,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 6
 PREHOOK: query: select count(*) from orc_ppd where t in (5, 120)
 PREHOOK: type: QUERY
@@ -586,6 +651,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 1100
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 50
 PREHOOK: query: select count(*) from orc_ppd where t between 60 and 80
 PREHOOK: type: QUERY
@@ -611,6 +681,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 318
 PREHOOK: query: select count(*) from orc_ppd where t = -100
 PREHOOK: type: QUERY
@@ -625,6 +700,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where t <=> -100
 PREHOOK: type: QUERY
@@ -639,6 +719,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where t = 125
 PREHOOK: type: QUERY
@@ -661,6 +746,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where t IN (-100, 125, 200)
 PREHOOK: type: QUERY
@@ -683,6 +773,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s > "zzz"
 PREHOOK: type: QUERY
@@ -697,6 +792,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where s = "zach young"
 PREHOOK: type: QUERY
@@ -724,6 +824,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s <=> "zach zipper"
 PREHOOK: type: QUERY
@@ -749,6 +854,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 6
 PREHOOK: query: select count(*) from orc_ppd where s <=> ""
 PREHOOK: type: QUERY
@@ -774,6 +884,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 6
 PREHOOK: query: select count(*) from orc_ppd where s is null
 PREHOOK: type: QUERY
@@ -788,6 +903,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where s is not null
 PREHOOK: type: QUERY
@@ -813,6 +933,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 3
    ROWS_EMITTED: 2100
    SELECTED_ROWGROUPS: 3
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2100
 PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as char(50))
 PREHOOK: type: QUERY
@@ -835,6 +960,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as char(10))
 PREHOOK: type: QUERY
@@ -860,6 +990,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(10))
 PREHOOK: type: QUERY
@@ -885,6 +1020,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(50))
 PREHOOK: type: QUERY
@@ -910,6 +1050,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s < "b"
 PREHOOK: type: QUERY
@@ -935,6 +1080,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 81
 PREHOOK: query: select count(*) from orc_ppd where s > "alice" and s < "bob"
 PREHOOK: type: QUERY
@@ -960,6 +1110,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 74
 PREHOOK: query: select count(*) from orc_ppd where s in ("alice allen", "")
 PREHOOK: type: QUERY
@@ -985,6 +1140,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 12
 PREHOOK: query: select count(*) from orc_ppd where s between "" and "alice allen"
 PREHOOK: type: QUERY
@@ -1010,6 +1170,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 13
 PREHOOK: query: select count(*) from orc_ppd where s between "zz" and "zzz"
 PREHOOK: type: QUERY
@@ -1035,6 +1200,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 100
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 1
 PREHOOK: query: select count(*) from orc_ppd where s between "zach zipper" and "zzz"
 PREHOOK: type: QUERY
@@ -1060,6 +1230,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 1100
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 7
 PREHOOK: query: select count(*) from orc_ppd where s = "hello world"
 PREHOOK: type: QUERY
@@ -1082,6 +1257,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s <=> "apache hive"
 PREHOOK: type: QUERY
@@ -1104,6 +1284,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s IN ("a", "z")
 PREHOOK: type: QUERY
@@ -1126,6 +1311,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s = "sarah ovid"
 PREHOOK: type: QUERY
@@ -1151,6 +1341,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 100
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s = "wendy king"
 PREHOOK: type: QUERY
@@ -1176,6 +1371,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 1100
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 6
 PREHOOK: query: select count(*) from orc_ppd where s = "wendy king" and t < 0
 PREHOOK: type: QUERY
@@ -1201,6 +1401,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s = "wendy king" and t > 100
 PREHOOK: type: QUERY
@@ -1226,4 +1431,9 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 100
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out b/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
index e61917f..eb54a81 100644
--- a/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
@@ -271,6 +271,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 3
    ROWS_EMITTED: 2100
    SELECTED_ROWGROUPS: 3
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2094
 PREHOOK: query: select count(*) from orc_ppd where t > -100
 PREHOOK: type: QUERY
@@ -296,6 +301,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 3
    ROWS_EMITTED: 2100
    SELECTED_ROWGROUPS: 3
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2094
 PREHOOK: query: DROP TABLE staging
 PREHOOK: type: DROPTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/fff86f3a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
index 7904661..c5302c3 100644
--- a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
@@ -223,6 +223,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 3
    ROWS_EMITTED: 2100
    SELECTED_ROWGROUPS: 3
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2100
 PREHOOK: query: select count(*) from orc_ppd where t > 127
 PREHOOK: type: QUERY
@@ -237,6 +242,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where t = 55
 PREHOOK: type: QUERY
@@ -264,6 +274,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 8
 PREHOOK: query: select count(*) from orc_ppd where t <=> 50
 PREHOOK: type: QUERY
@@ -289,6 +304,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 22
 PREHOOK: query: select count(*) from orc_ppd where t <=> 100
 PREHOOK: type: QUERY
@@ -314,6 +334,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 16
 PREHOOK: query: select count(*) from orc_ppd where t = "54"
 PREHOOK: type: QUERY
@@ -339,6 +364,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 18
 PREHOOK: query: select count(*) from orc_ppd where t = -10.0
 PREHOOK: type: QUERY
@@ -364,6 +394,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 1
 PREHOOK: query: select count(*) from orc_ppd where t = cast(53 as float)
 PREHOOK: type: QUERY
@@ -389,6 +424,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 32
 PREHOOK: query: select count(*) from orc_ppd where t = cast(53 as double)
 PREHOOK: type: QUERY
@@ -414,6 +454,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 32
 PREHOOK: query: select count(*) from orc_ppd where t < 100
 PREHOOK: type: QUERY
@@ -439,6 +484,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 1697
 PREHOOK: query: select count(*) from orc_ppd where t < 100 and t > 98
 PREHOOK: type: QUERY
@@ -464,6 +514,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 12
 PREHOOK: query: select count(*) from orc_ppd where t <= 100
 PREHOOK: type: QUERY
@@ -489,6 +544,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 1713
 PREHOOK: query: select count(*) from orc_ppd where t is null
 PREHOOK: type: QUERY
@@ -514,6 +574,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 6
 PREHOOK: query: select count(*) from orc_ppd where t in (5, 120)
 PREHOOK: type: QUERY
@@ -539,6 +604,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 1100
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 50
 PREHOOK: query: select count(*) from orc_ppd where t between 60 and 80
 PREHOOK: type: QUERY
@@ -564,6 +634,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 318
 PREHOOK: query: select count(*) from orc_ppd where t = -100
 PREHOOK: type: QUERY
@@ -578,6 +653,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where t <=> -100
 PREHOOK: type: QUERY
@@ -592,6 +672,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where t = 125
 PREHOOK: type: QUERY
@@ -614,6 +699,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where t IN (-100, 125, 200)
 PREHOOK: type: QUERY
@@ -636,6 +726,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s > "zzz"
 PREHOOK: type: QUERY
@@ -650,6 +745,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where s = "zach young"
 PREHOOK: type: QUERY
@@ -677,6 +777,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s <=> "zach zipper"
 PREHOOK: type: QUERY
@@ -702,6 +807,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 6
 PREHOOK: query: select count(*) from orc_ppd where s <=> ""
 PREHOOK: type: QUERY
@@ -727,6 +837,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 6
 PREHOOK: query: select count(*) from orc_ppd where s is null
 PREHOOK: type: QUERY
@@ -741,6 +856,11 @@ Stage-1 FILE SYSTEM COUNTERS:
 Stage-1 HIVE COUNTERS:
    CREATED_FILES: 1
    RECORDS_OUT_0: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 0
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 0
+   RAW_INPUT_SPLITS_Map_1: 0
 0
 PREHOOK: query: select count(*) from orc_ppd where s is not null
 PREHOOK: type: QUERY
@@ -766,6 +886,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 3
    ROWS_EMITTED: 2100
    SELECTED_ROWGROUPS: 3
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2100
 PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as char(50))
 PREHOOK: type: QUERY
@@ -788,6 +913,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as char(10))
 PREHOOK: type: QUERY
@@ -813,6 +943,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(10))
 PREHOOK: type: QUERY
@@ -838,6 +973,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(50))
 PREHOOK: type: QUERY
@@ -863,6 +1003,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s < "b"
 PREHOOK: type: QUERY
@@ -888,6 +1033,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 81
 PREHOOK: query: select count(*) from orc_ppd where s > "alice" and s < "bob"
 PREHOOK: type: QUERY
@@ -913,6 +1063,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 74
 PREHOOK: query: select count(*) from orc_ppd where s in ("alice allen", "")
 PREHOOK: type: QUERY
@@ -938,6 +1093,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 12
 PREHOOK: query: select count(*) from orc_ppd where s between "" and "alice allen"
 PREHOOK: type: QUERY
@@ -963,6 +1123,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 2000
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 13
 PREHOOK: query: select count(*) from orc_ppd where s between "zz" and "zzz"
 PREHOOK: type: QUERY
@@ -988,6 +1153,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 100
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 1
 PREHOOK: query: select count(*) from orc_ppd where s between "zach zipper" and "zzz"
 PREHOOK: type: QUERY
@@ -1013,6 +1183,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 1100
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 7
 PREHOOK: query: select count(*) from orc_ppd where s = "hello world"
 PREHOOK: type: QUERY
@@ -1035,6 +1210,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s <=> "apache hive"
 PREHOOK: type: QUERY
@@ -1057,6 +1237,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s IN ("a", "z")
 PREHOOK: type: QUERY
@@ -1079,6 +1264,11 @@ Stage-1 LLAP IO COUNTERS:
    CACHE_MISS_BYTES: 0
    METADATA_CACHE_HIT: 2
    SELECTED_ROWGROUPS: 0
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 0
 PREHOOK: query: select count(*) from orc_ppd where s = "sarah ovid"
 PREHOOK: type: QUERY
@@ -1104,6 +1294,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 100
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s = "wendy king"
 PREHOOK: type: QUERY
@@ -1129,6 +1324,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 2
    ROWS_EMITTED: 1100
    SELECTED_ROWGROUPS: 2
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 6
 PREHOOK: query: select count(*) from orc_ppd where s = "wendy king" and t < 0
 PREHOOK: type: QUERY
@@ -1154,6 +1354,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 1000
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where s = "wendy king" and t > 100
 PREHOOK: type: QUERY
@@ -1179,6 +1384,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 100
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where f=74.72
 PREHOOK: type: QUERY
@@ -1206,6 +1416,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 3
    ROWS_EMITTED: 2100
    SELECTED_ROWGROUPS: 3
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where f=74.72
 PREHOOK: type: QUERY
@@ -1233,6 +1448,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 100
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where f=74.72
 PREHOOK: type: QUERY
@@ -1258,6 +1478,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 3
    ROWS_EMITTED: 2100
    SELECTED_ROWGROUPS: 3
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: select count(*) from orc_ppd where f=74.72
 PREHOOK: type: QUERY
@@ -1283,6 +1508,11 @@ Stage-1 LLAP IO COUNTERS:
    NUM_VECTOR_BATCHES: 1
    ROWS_EMITTED: 100
    SELECTED_ROWGROUPS: 1
+Stage-1 INPUT COUNTERS:
+   GROUPED_INPUT_SPLITS_Map_1: 1
+   INPUT_DIRECTORIES_Map_1: 1
+   INPUT_FILES_Map_1: 1
+   RAW_INPUT_SPLITS_Map_1: 1
 2
 PREHOOK: query: create temporary table tmp_orcppd
                     stored as orc