You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2016/04/24 09:33:30 UTC

hive git commit: HIVE-13494: LLAP: Some metrics from daemon are not exposed to hadoop-metrics2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Repository: hive
Updated Branches:
  refs/heads/master 74197688f -> a42bc6746


HIVE-13494: LLAP: Some metrics from daemon are not exposed to hadoop-metrics2 (Prasanth Jayachandran reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a42bc674
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a42bc674
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a42bc674

Branch: refs/heads/master
Commit: a42bc67467db024e194773dd50f9cd303882fa82
Parents: 7419768
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Sun Apr 24 02:33:09 2016 -0500
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Sun Apr 24 02:33:09 2016 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   7 +-
 .../hive/common/util/FixedSizedObjectPool.java  |   5 +
 .../hive/llap/metrics/LlapMetricsSystem.java    |  57 ++++++++
 .../hadoop/hive/llap/metrics/MetricsUtils.java  |  43 ++++++
 .../hadoop/hive/llap/cache/BuddyAllocator.java  |   3 +
 .../hive/llap/daemon/impl/LlapDaemon.java       |   5 +
 .../hive/llap/io/api/impl/LlapIoImpl.java       |  40 +++---
 .../llap/io/decode/EncodedDataConsumer.java     |  15 +-
 .../llap/io/decode/OrcColumnVectorProducer.java |  12 +-
 .../llap/io/decode/OrcEncodedDataConsumer.java  |   6 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |   5 +-
 .../hive/llap/metrics/LlapDaemonCacheInfo.java  |   5 +-
 .../llap/metrics/LlapDaemonCacheMetrics.java    |  26 +++-
 .../llap/metrics/LlapDaemonExecutorInfo.java    |   6 +
 .../llap/metrics/LlapDaemonExecutorMetrics.java |  45 +++++-
 .../hive/llap/metrics/LlapDaemonIOInfo.java     |  53 +++++++
 .../hive/llap/metrics/LlapDaemonIOMetrics.java  | 144 +++++++++++++++++++
 .../hive/llap/metrics/LlapDaemonQueueInfo.java  |  50 -------
 .../llap/metrics/LlapDaemonQueueMetrics.java    | 116 ---------------
 .../hive/llap/metrics/LlapMetricsSystem.java    |  57 --------
 .../hadoop/hive/llap/metrics/MetricsUtils.java  |  43 ------
 .../ql/io/orc/encoded/EncodedReaderImpl.java    |   8 +-
 .../org/apache/hadoop/hive/common/Pool.java     |   1 +
 23 files changed, 446 insertions(+), 306 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 926806b..5b5b350 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -295,6 +295,7 @@ public class HiveConf extends Configuration {
     llapDaemonVarsSetLocal.add(ConfVars.LLAP_LRFU_LAMBDA.varname);
     llapDaemonVarsSetLocal.add(ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID.varname);
     llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_USE_FILEID_PATH.varname);
+    llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS.varname);
     llapDaemonVarsSetLocal.add(ConfVars.LLAP_ORC_ENABLE_TIME_COUNTERS.varname);
     llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname);
     llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_PRINCIPAL.varname);
@@ -2622,10 +2623,10 @@ public class HiveConf extends Configuration {
         "Chooses whether query fragments will run in container or in llap"),
     LLAP_OBJECT_CACHE_ENABLED("hive.llap.object.cache.enabled", true,
         "Cache objects (plans, hashtables, etc) in llap"),
-    LLAP_QUEUE_METRICS_PERCENTILE_INTERVALS("hive.llap.queue.metrics.percentiles.intervals", "",
+    LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS("hive.llap.io.decoding.metrics.percentiles.intervals", "30",
         "Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" +
-        "for percentile latency metrics on the LLAP daemon producer-consumer queue.\n" +
-        "By default, percentile latency metrics are disabled."),
+        "for percentile latency metrics on the LLAP daemon IO decoding time.\n" +
+        "hive.llap.queue.metrics.percentiles.intervals"),
     LLAP_IO_THREADPOOL_SIZE("hive.llap.io.threadpool.size", 10,
         "Specify the number of threads to use for low-level IO thread pool."),
     LLAP_KERBEROS_PRINCIPAL(HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME, "",

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/common/src/java/org/apache/hive/common/util/FixedSizedObjectPool.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/common/util/FixedSizedObjectPool.java b/common/src/java/org/apache/hive/common/util/FixedSizedObjectPool.java
index 600c443..3e80613 100644
--- a/common/src/java/org/apache/hive/common/util/FixedSizedObjectPool.java
+++ b/common/src/java/org/apache/hive/common/util/FixedSizedObjectPool.java
@@ -142,6 +142,11 @@ public class FixedSizedObjectPool<T> implements Pool<T> {
     tryOffer(t);
   }
 
+  @Override
+  public int size() {
+    return pool.length;
+  }
+
   @VisibleForTesting
   public boolean tryOffer(T t) {
     if (t == null || pool.length == 0) return false; // 0 size means no-pooling case - passthru.

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-common/src/java/org/apache/hadoop/hive/llap/metrics/LlapMetricsSystem.java
----------------------------------------------------------------------
diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/metrics/LlapMetricsSystem.java b/llap-common/src/java/org/apache/hadoop/hive/llap/metrics/LlapMetricsSystem.java
new file mode 100644
index 0000000..710182d
--- /dev/null
+++ b/llap-common/src/java/org/apache/hadoop/hive/llap/metrics/LlapMetricsSystem.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.llap.metrics;
+
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
+
+/**
+ * Metrics system for llap daemon. We do not use DefaultMetricsSystem here to safegaurd against
+ * Tez accidentally shutting it down.
+ */
+public enum LlapMetricsSystem  {
+  INSTANCE;
+
+  private AtomicReference<MetricsSystem> impl =
+      new AtomicReference<MetricsSystem>(new MetricsSystemImpl());
+
+  /**
+   * Convenience method to initialize the metrics system
+   * @param prefix  for the metrics system configuration
+   * @return the metrics system instance
+   */
+  public static MetricsSystem initialize(String prefix) {
+    return INSTANCE.impl.get().init(prefix);
+  }
+
+  /**
+   * @return the metrics system object
+   */
+  public static MetricsSystem instance() {
+    return INSTANCE.impl.get();
+  }
+
+  /**
+   * Shutdown the metrics system
+   */
+  public static void shutdown() {
+    INSTANCE.impl.get().shutdown();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-common/src/java/org/apache/hadoop/hive/llap/metrics/MetricsUtils.java
----------------------------------------------------------------------
diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/metrics/MetricsUtils.java b/llap-common/src/java/org/apache/hadoop/hive/llap/metrics/MetricsUtils.java
new file mode 100644
index 0000000..c22ce4e
--- /dev/null
+++ b/llap-common/src/java/org/apache/hadoop/hive/llap/metrics/MetricsUtils.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.llap.metrics;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.UUID;
+
+/**
+ * Utility methods for metrics system.
+ */
+public class MetricsUtils {
+  private static final String LOCALHOST = "localhost";
+  public static final String METRICS_PROCESS_NAME = "LlapDaemon";
+
+
+  public static String getHostName() {
+    try {
+      return InetAddress.getLocalHost().getHostName();
+    } catch (UnknownHostException e) {
+      return LOCALHOST;
+    }
+  }
+
+  public static String getUUID() {
+    return String.valueOf(UUID.randomUUID());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
index 824ff33..9f7e5c9 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
@@ -113,6 +113,9 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
 
     this.metrics = metrics;
     metrics.incrAllocatedArena();
+    metrics.setArenaSize(arenaSize);
+    metrics.setMinAllocationSize(minAllocation);
+    metrics.setMaxAllocationSize(maxAllocation);
   }
 
   // TODO: would it make sense to return buffers asynchronously?

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index a0250cb..33b41e8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -194,6 +194,11 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
     String sessionId = MetricsUtils.getUUID();
     daemonConf.set("llap.daemon.metrics.sessionid", sessionId);
     this.metrics = LlapDaemonExecutorMetrics.create(displayName, sessionId, numExecutors);
+    this.metrics.setMemoryPerInstance(executorMemoryBytes);
+    this.metrics.setCacheMemoryPerInstance(ioMemoryBytes);
+    this.metrics.setJvmMaxMemory(maxJvmMemory);
+    this.metrics.setWaitQueueSize(waitQueueSize);
+    this.metrics.setRpcNumHandlers(numHandlers);
     metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
     this.llapDaemonInfoBean = MBeans.register("LlapDaemon", "LlapDaemonInfo", this);
     LOG.info("Started LlapMetricsSystem with displayName: " + displayName +

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
index 36f8dec..d43ff15 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
@@ -19,6 +19,9 @@
 package org.apache.hadoop.hive.llap.io.api.impl;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 import java.util.concurrent.Executors;
 
 import javax.management.ObjectName;
@@ -44,13 +47,14 @@ import org.apache.hadoop.hive.llap.io.decode.ColumnVectorProducer;
 import org.apache.hadoop.hive.llap.io.decode.OrcColumnVectorProducer;
 import org.apache.hadoop.hive.llap.io.metadata.OrcMetadataCache;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
-import org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueMetrics;
+import org.apache.hadoop.hive.llap.metrics.LlapDaemonIOMetrics;
 import org.apache.hadoop.hive.llap.metrics.MetricsUtils;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.metrics2.util.MBeans;
 
+import com.google.common.primitives.Ints;
 import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -66,7 +70,7 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
   private final ColumnVectorProducer cvp;
   private final ListeningExecutorService executor;
   private LlapDaemonCacheMetrics cacheMetrics;
-  private LlapDaemonQueueMetrics queueMetrics;
+  private LlapDaemonIOMetrics ioMetrics;
   private ObjectName buddyAllocatorMXBean;
   private Allocator allocator;
 
@@ -75,15 +79,25 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
     boolean useLowLevelCache = LlapIoImpl.MODE_CACHE.equalsIgnoreCase(ioMode),
         useAllocOnly = !useLowLevelCache && LlapIoImpl.MODE_ALLOCATOR.equalsIgnoreCase(ioMode);
     LOG.info("Initializing LLAP IO in {} mode", ioMode);
-
     String displayName = "LlapDaemonCacheMetrics-" + MetricsUtils.getHostName();
     String sessionId = conf.get("llap.daemon.metrics.sessionid");
     this.cacheMetrics = LlapDaemonCacheMetrics.create(displayName, sessionId);
 
-    displayName = "LlapDaemonQueueMetrics-" + MetricsUtils.getHostName();
-    int[] intervals = conf.getInts(String.valueOf(
-        HiveConf.ConfVars.LLAP_QUEUE_METRICS_PERCENTILE_INTERVALS));
-    this.queueMetrics = LlapDaemonQueueMetrics.create(displayName, sessionId, intervals);
+    displayName = "LlapDaemonIOMetrics-" + MetricsUtils.getHostName();
+    String[] strIntervals = HiveConf.getTrimmedStringsVar(conf,
+        HiveConf.ConfVars.LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS);
+    List<Integer> intervalList = new ArrayList<>();
+    if (strIntervals != null) {
+      for (String strInterval : strIntervals) {
+        try {
+          intervalList.add(Integer.valueOf(strInterval));
+        } catch (NumberFormatException e) {
+          LOG.warn("Ignoring IO decoding metrics interval {} from {} as it is invalid", strInterval,
+              Arrays.toString(strIntervals));
+        }
+      }
+    }
+    this.ioMetrics = LlapDaemonIOMetrics.create(displayName, sessionId, Ints.toArray(intervalList));
 
     LOG.info("Started llap daemon metrics with displayName: {} sessionId: {}", displayName,
         sessionId);
@@ -123,10 +137,10 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
     int numThreads = HiveConf.getIntVar(conf, HiveConf.ConfVars.LLAP_IO_THREADPOOL_SIZE);
     executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads,
         new ThreadFactoryBuilder().setNameFormat("IO-Elevator-Thread-%d").setDaemon(true).build()));
-
+    ioMetrics.setIoThreadPoolSize(numThreads);
     // TODO: this should depends on input format and be in a map, or something.
     this.cvp = new OrcColumnVectorProducer(
-        metadataCache, orcCache, bufferManager, conf, cacheMetrics, queueMetrics);
+        metadataCache, orcCache, bufferManager, conf, cacheMetrics, ioMetrics);
     LOG.info("LLAP IO initialized");
 
     registerMXBeans();
@@ -143,14 +157,6 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
     return new LlapInputFormat(sourceInputFormat, cvp, executor);
   }
 
-  public LlapDaemonCacheMetrics getCacheMetrics() {
-    return cacheMetrics;
-  }
-
-  public LlapDaemonQueueMetrics getQueueMetrics() {
-    return queueMetrics;
-  }
-
   @Override
   public void close() {
     LOG.info("Closing LlapIoImpl..");

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java
index 137acb0..0ba7c09 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.common.Pool;
 import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch;
 import org.apache.hadoop.hive.llap.ConsumerFeedback;
 import org.apache.hadoop.hive.llap.io.api.impl.ColumnVectorBatch;
-import org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueMetrics;
+import org.apache.hadoop.hive.llap.metrics.LlapDaemonIOMetrics;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Consumer;
 import org.apache.hive.common.util.FixedSizedObjectPool;
 
@@ -33,15 +33,15 @@ public abstract class EncodedDataConsumer<BatchKey, BatchType extends EncodedCol
   private ConsumerFeedback<BatchType> upstreamFeedback;
   private final Consumer<ColumnVectorBatch> downstreamConsumer;
   private Callable<Void> readCallable;
-  private final LlapDaemonQueueMetrics queueMetrics;
+  private final LlapDaemonIOMetrics ioMetrics;
   // Note that the pool is per EDC - within EDC, CVBs are expected to have the same schema.
   private final static int CVB_POOL_SIZE = 128;
   protected final FixedSizedObjectPool<ColumnVectorBatch> cvbPool;
 
   public EncodedDataConsumer(Consumer<ColumnVectorBatch> consumer, final int colCount,
-      LlapDaemonQueueMetrics queueMetrics) {
+      LlapDaemonIOMetrics ioMetrics) {
     this.downstreamConsumer = consumer;
-    this.queueMetrics = queueMetrics;
+    this.ioMetrics = ioMetrics;
     cvbPool = new FixedSizedObjectPool<ColumnVectorBatch>(CVB_POOL_SIZE,
         new Pool.PoolObjectHelper<ColumnVectorBatch>() {
           @Override
@@ -53,6 +53,7 @@ public abstract class EncodedDataConsumer<BatchKey, BatchType extends EncodedCol
             // Don't reset anything, we are reusing column vectors.
           }
         });
+    this.ioMetrics.setColumnVectorBatchPoolSize(cvbPool.size());
   }
 
   public void init(ConsumerFeedback<BatchType> upstreamFeedback,
@@ -61,6 +62,10 @@ public abstract class EncodedDataConsumer<BatchKey, BatchType extends EncodedCol
     this.readCallable = readCallable;
   }
 
+  public LlapDaemonIOMetrics getIOMetrics() {
+    return ioMetrics;
+  }
+
   @Override
   public Callable<Void> getReadCallable() {
     return readCallable;
@@ -75,7 +80,7 @@ public abstract class EncodedDataConsumer<BatchKey, BatchType extends EncodedCol
     long start = System.currentTimeMillis();
     decodeBatch(data, downstreamConsumer);
     long end = System.currentTimeMillis();
-    queueMetrics.addProcessingTime(end - start);
+    ioMetrics.addDecodeBatchTime(end - start);
     returnSourceData(data);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
index 024c485..7db519c 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
 import org.apache.hadoop.hive.llap.io.encoded.OrcEncodedDataReader;
 import org.apache.hadoop.hive.llap.io.metadata.OrcMetadataCache;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
-import org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueMetrics;
+import org.apache.hadoop.hive.llap.metrics.LlapDaemonIOMetrics;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Consumer;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.mapred.FileSplit;
@@ -44,11 +44,11 @@ public class OrcColumnVectorProducer implements ColumnVectorProducer {
   private final Configuration conf;
   private boolean _skipCorrupt; // TODO: get rid of this
   private LlapDaemonCacheMetrics cacheMetrics;
-  private LlapDaemonQueueMetrics queueMetrics;
+  private LlapDaemonIOMetrics ioMetrics;
 
   public OrcColumnVectorProducer(OrcMetadataCache metadataCache,
       LowLevelCacheImpl lowLevelCache, BufferUsageManager bufferManager,
-      Configuration conf, LlapDaemonCacheMetrics metrics, LlapDaemonQueueMetrics queueMetrics) {
+      Configuration conf, LlapDaemonCacheMetrics cacheMetrics, LlapDaemonIOMetrics ioMetrics) {
     LlapIoImpl.LOG.info("Initializing ORC column vector producer");
 
     this.metadataCache = metadataCache;
@@ -56,8 +56,8 @@ public class OrcColumnVectorProducer implements ColumnVectorProducer {
     this.bufferManager = bufferManager;
     this.conf = conf;
     this._skipCorrupt = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ORC_SKIP_CORRUPT_DATA);
-    this.cacheMetrics = metrics;
-    this.queueMetrics = queueMetrics;
+    this.cacheMetrics = cacheMetrics;
+    this.ioMetrics = ioMetrics;
   }
 
   @Override
@@ -67,7 +67,7 @@ public class OrcColumnVectorProducer implements ColumnVectorProducer {
       QueryFragmentCounters counters) {
     cacheMetrics.incrCacheReadRequests();
     OrcEncodedDataConsumer edc = new OrcEncodedDataConsumer(consumer, columnIds.size(),
-        _skipCorrupt, counters, queueMetrics);
+        _skipCorrupt, counters, ioMetrics);
     OrcEncodedDataReader reader = new OrcEncodedDataReader(lowLevelCache, bufferManager,
         metadataCache, conf, split, columnIds, sarg, columnNames, edc, counters);
     edc.init(reader, reader);

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
index 7ee263d..0651557 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters;
 import org.apache.hadoop.hive.llap.io.api.impl.ColumnVectorBatch;
 import org.apache.hadoop.hive.llap.io.metadata.OrcFileMetadata;
 import org.apache.hadoop.hive.llap.io.metadata.OrcStripeMetadata;
-import org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueMetrics;
+import org.apache.hadoop.hive.llap.metrics.LlapDaemonIOMetrics;
 import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.orc.CompressionCodec;
@@ -52,8 +52,8 @@ public class OrcEncodedDataConsumer
 
   public OrcEncodedDataConsumer(
       Consumer<ColumnVectorBatch> consumer, int colCount, boolean skipCorrupt,
-      QueryFragmentCounters counters, LlapDaemonQueueMetrics queueMetrics) {
-    super(consumer, colCount, queueMetrics);
+      QueryFragmentCounters counters, LlapDaemonIOMetrics ioMetrics) {
+    super(consumer, colCount, ioMetrics);
     // TODO: get rid of this
     this.skipCorrupt = skipCorrupt;
     this.counters = counters;

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index fb0867d..eb953c7 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -26,6 +26,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hive.llap.counters.LlapIOCounters;
+import org.apache.hadoop.hive.llap.metrics.LlapDaemonIOMetrics;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -66,7 +67,6 @@ import org.apache.hadoop.hive.ql.io.orc.OrcSplit;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Reader;
 import org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl;
 import org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.SargApplier;
-import org.apache.hadoop.hive.ql.io.orc.encoded.Consumer;
 import org.apache.hadoop.hive.ql.io.orc.encoded.EncodedOrcFile;
 import org.apache.hadoop.hive.ql.io.orc.encoded.EncodedReader;
 import org.apache.hadoop.hive.ql.io.orc.encoded.OrcBatchKey;
@@ -180,6 +180,9 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
+    LlapDaemonIOMetrics ioMetrics = consumer.getIOMetrics();
+    ioMetrics.setColumnStreamDataPoolSize(CSD_POOL.size());
+    ioMetrics.setEncodedColumnBatchPoolSize(ECB_POOL.size());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
index 8f0e9d8..191345e 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
@@ -34,7 +34,10 @@ public enum LlapDaemonCacheInfo implements MetricsInfo {
   CacheHitRatio("Ratio of disk ranges cached vs requested"),
   CacheReadRequests("Number of disk range requests to cache"),
   CacheAllocatedArena("Number of arenas allocated"),
-  CacheNumLockedBuffers("Number of locked buffers in cache");
+  CacheNumLockedBuffers("Number of locked buffers in cache"),
+  CacheArenaSize("Size of arena used by allocator"),
+  CacheMinAllocationSize("Minimum allocation size used by allocator"),
+  CacheMaxAllocationSize("Maximum allocation size used by allocator");
 
   private final String desc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java
index b89c6c4..bb76da5 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java
@@ -18,11 +18,14 @@
 package org.apache.hadoop.hive.llap.metrics;
 
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheAllocatedArena;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheArenaSize;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheCapacityRemaining;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheCapacityTotal;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheCapacityUsed;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheHitBytes;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheHitRatio;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheMaxAllocationSize;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheMinAllocationSize;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheNumLockedBuffers;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheReadRequests;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheInfo.CacheRequestedBytes;
@@ -65,6 +68,12 @@ public class LlapDaemonCacheMetrics implements MetricsSource {
   MutableCounterLong cacheAllocatedArena;
   @Metric
   MutableCounterLong cacheNumLockedBuffers;
+  @Metric
+  MutableGaugeLong arenaSize;
+  @Metric
+  MutableGaugeLong minAllocationSize;
+  @Metric
+  MutableGaugeLong maxAllocationSize;
 
   private LlapDaemonCacheMetrics(String name, String sessionId) {
     this.name = name;
@@ -106,6 +115,18 @@ public class LlapDaemonCacheMetrics implements MetricsSource {
     cacheNumLockedBuffers.incr();
   }
 
+  public void setArenaSize(long value) {
+    arenaSize.set(value);
+  }
+
+  public void setMinAllocationSize(long value) {
+    minAllocationSize.set(value);
+  }
+
+  public void setMaxAllocationSize(long value) {
+    maxAllocationSize.set(value);
+  }
+
   public void decrCacheNumLockedBuffers() {
     cacheNumLockedBuffers.incr(-1);
   }
@@ -145,7 +166,10 @@ public class LlapDaemonCacheMetrics implements MetricsSource {
         .addCounter(CacheHitBytes, cacheHitBytes.value())
         .addCounter(CacheAllocatedArena, cacheAllocatedArena.value())
         .addCounter(CacheNumLockedBuffers, cacheNumLockedBuffers.value())
-        .addGauge(CacheHitRatio, cacheHitRatio);
+        .addGauge(CacheHitRatio, cacheHitRatio)
+        .addGauge(CacheArenaSize, arenaSize.value())
+        .addGauge(CacheMinAllocationSize, minAllocationSize.value())
+        .addGauge(CacheMaxAllocationSize, maxAllocationSize.value());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
index e4739dc..941d926 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
@@ -26,6 +26,12 @@ import com.google.common.base.Objects;
  */
 public enum LlapDaemonExecutorInfo implements MetricsInfo {
   ExecutorMetrics("Llap daemon cache related metrics"),
+  ExecutorThreadCountPerInstance("Total number of executor threads per node"),
+  ExecutorMemoryPerInstance("Total memory for executors per node in bytes"),
+  ExecutorCacheMemoryPerInstance("Total Cache memory per node in bytes"),
+  ExecutorJvmMaxMemory("Max memory available for JVM in bytes"),
+  ExecutorWaitQueueSize("Size of wait queue per node"),
+  ExecutorRpcNumHandlers("Number of RPC handlers per node"),
   ExecutorThreadCPUTime("Cpu time in nanoseconds"),
   ExecutorThreadUserTime("User time in nanoseconds"),
   ExecutorTotalRequestsHandled("Total number of requests handled by the container"),

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
index 7919200..894880f 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
@@ -17,8 +17,13 @@
  */
 package org.apache.hadoop.hive.llap.metrics;
 
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorCacheMemoryPerInstance;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorJvmMaxMemory;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorMemoryPerInstance;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorNumQueuedRequests;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorRpcNumHandlers;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorThreadCPUTime;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorThreadCountPerInstance;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorThreadUserTime;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorTotalAskedToDie;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorTotalExecutionFailure;
@@ -26,6 +31,7 @@ import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.Executo
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorTotalRequestsHandled;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorTotalSuccess;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorMetrics;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.ExecutorWaitQueueSize;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorInfo.PreemptionTimeLost;
 import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
 import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
@@ -46,6 +52,7 @@ import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 
@@ -78,7 +85,16 @@ public class LlapDaemonExecutorMetrics implements MetricsSource {
   MutableCounterLong executorTotalExecutionFailed;
   @Metric
   MutableCounterLong preemptionTimeLost;
-
+  @Metric
+  MutableGaugeLong cacheMemoryPerInstance;
+  @Metric
+  MutableGaugeLong memoryPerInstance;
+  @Metric
+  MutableGaugeLong jvmMaxMemory;
+  @Metric
+  MutableGaugeInt waitQueueSize;
+  @Metric
+  MutableGaugeInt rpcNumHandlers;
 
   private LlapDaemonExecutorMetrics(String displayName, JvmMetrics jm, String sessionId,
       int numExecutors) {
@@ -151,6 +167,25 @@ public class LlapDaemonExecutorMetrics implements MetricsSource {
     executorTotalIKilled.incr();
   }
 
+  public void setCacheMemoryPerInstance(long value) {
+    cacheMemoryPerInstance.set(value);
+  }
+
+  public void setMemoryPerInstance(long value) {
+    memoryPerInstance.set(value);
+  }
+
+  public void setJvmMaxMemory(long value) {
+    jvmMaxMemory.set(value);
+  }
+
+  public void setWaitQueueSize(int size) {
+    waitQueueSize.set(size);
+  }
+
+  public void setRpcNumHandlers(int numHandlers) {
+    rpcNumHandlers.set(numHandlers);
+  }
 
   private void getExecutorStats(MetricsRecordBuilder rb) {
     updateThreadMetrics(rb);
@@ -160,7 +195,13 @@ public class LlapDaemonExecutorMetrics implements MetricsSource {
         .addCounter(ExecutorTotalSuccess, executorTotalSuccess.value())
         .addCounter(ExecutorTotalExecutionFailure, executorTotalExecutionFailed.value())
         .addCounter(ExecutorTotalInterrupted, executorTotalIKilled.value())
-        .addCounter(PreemptionTimeLost, preemptionTimeLost.value());
+        .addCounter(PreemptionTimeLost, preemptionTimeLost.value())
+        .addGauge(ExecutorThreadCountPerInstance, numExecutors)
+        .addGauge(ExecutorMemoryPerInstance, memoryPerInstance.value())
+        .addGauge(ExecutorCacheMemoryPerInstance, cacheMemoryPerInstance.value())
+        .addGauge(ExecutorJvmMaxMemory, jvmMaxMemory.value())
+        .addGauge(ExecutorWaitQueueSize, waitQueueSize.value())
+        .addGauge(ExecutorRpcNumHandlers, rpcNumHandlers.value());
   }
 
   private void updateThreadMetrics(MetricsRecordBuilder rb) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
new file mode 100644
index 0000000..79f004b
--- /dev/null
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.llap.metrics;
+
+import org.apache.hadoop.metrics2.MetricsInfo;
+
+import com.google.common.base.Objects;
+
+/**
+ * Llap daemon I/O elevator metrics
+ */
+public enum LlapDaemonIOInfo implements MetricsInfo {
+  IOMetrics("Llap daemon I/O elevator metrics"),
+  IoThreadPoolSize("Size of the thread pool used by IO elevator"),
+  EncodedColumnBatchPoolSize("Size of the object pool that stores encoded column batches"),
+  ColumnStreamDataPoolSize("Size of the object pool that stores column stream data"),
+  ColumnVectorBatchPoolSize("Size of the object pool that stores column vector batches"),
+  PercentileDecodingTime("Percentile decoding time for encoded column batch"),
+  MaxDecodingTime("Max time for decoding an encoded column batch");
+
+  private final String desc;
+
+  LlapDaemonIOInfo(String desc) {
+    this.desc = desc;
+  }
+
+  @Override
+  public String description() {
+    return desc;
+  }
+
+  @Override
+  public String toString() {
+    return Objects.toStringHelper(this)
+        .add("name", name()).add("description", desc)
+        .toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOMetrics.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOMetrics.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOMetrics.java
new file mode 100644
index 0000000..f3def75
--- /dev/null
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOMetrics.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.llap.metrics;
+
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonIOInfo.ColumnStreamDataPoolSize;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonIOInfo.ColumnVectorBatchPoolSize;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonIOInfo.EncodedColumnBatchPoolSize;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonIOInfo.IoThreadPoolSize;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonIOInfo.MaxDecodingTime;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonIOInfo.IOMetrics;
+import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
+import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ */
+@Metrics(about = "LlapDaemon IO Metrics", context = "io")
+public class LlapDaemonIOMetrics implements MetricsSource {
+  protected static final Logger LOG = LoggerFactory.getLogger(LlapDaemonIOMetrics.class);
+  private final String name;
+  private final String sessionId;
+  private final MetricsRegistry registry;
+  private long maxTime = Long.MIN_VALUE;
+
+  @Metric
+  MutableGaugeInt encodedColumnBatchPoolSize;
+  @Metric
+  MutableGaugeInt columnStreamDataPoolSize;
+  @Metric
+  MutableGaugeInt columnVectorBatchPool;
+  @Metric
+  MutableGaugeInt ioThreadPoolSize;
+  @Metric
+  MutableRate rateOfDecoding;
+  final MutableQuantiles[] decodingTimes;
+  @Metric
+  MutableGaugeLong maxDecodingTime;
+
+  private LlapDaemonIOMetrics(String displayName, String sessionId, int[] intervals) {
+    this.name = displayName;
+    this.sessionId = sessionId;
+    this.registry = new MetricsRegistry("LlapDaemonIORegistry");
+    this.registry.tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId);
+
+    final int len = intervals == null ? 0 : intervals.length;
+    this.decodingTimes = new MutableQuantiles[len];
+    for (int i=0; i<len; i++) {
+      int interval = intervals[i];
+      LOG.info("Created interval " + LlapDaemonIOInfo.PercentileDecodingTime.name() + "_" + interval + "s");
+      decodingTimes[i] = registry.newQuantiles(
+          LlapDaemonIOInfo.PercentileDecodingTime.name() + "_" + interval + "s",
+          LlapDaemonIOInfo.PercentileDecodingTime.description(),
+          "ops", "latency", interval);
+    }
+  }
+
+  public static LlapDaemonIOMetrics create(String displayName, String sessionId, int[] intervals) {
+    MetricsSystem ms = LlapMetricsSystem.instance();
+    return ms.register(displayName, null, new LlapDaemonIOMetrics(displayName, sessionId, intervals));
+  }
+
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean b) {
+    MetricsRecordBuilder rb = collector.addRecord(IOMetrics)
+        .setContext("io")
+        .tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME)
+        .tag(SessionId, sessionId);
+    getIoStats(rb);
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public void setEncodedColumnBatchPoolSize(int size) {
+    encodedColumnBatchPoolSize.set(size);
+  }
+
+  public void setColumnStreamDataPoolSize(int size) {
+    columnStreamDataPoolSize.set(size);
+  }
+
+  public void setColumnVectorBatchPoolSize(int size) {
+    columnVectorBatchPool.set(size);
+  }
+
+  public void setIoThreadPoolSize(int size) {
+    ioThreadPoolSize.set(size);
+  }
+
+  public void addDecodeBatchTime(long latency) {
+    rateOfDecoding.add(latency);
+    if (latency > maxTime) {
+      maxTime = latency;
+      maxDecodingTime.set(maxTime);
+    }
+    for (MutableQuantiles q : decodingTimes) {
+      q.add(latency);
+    }
+  }
+
+  private void getIoStats(MetricsRecordBuilder rb) {
+    rb.addGauge(EncodedColumnBatchPoolSize, encodedColumnBatchPoolSize.value())
+        .addGauge(ColumnStreamDataPoolSize, columnStreamDataPoolSize.value())
+        .addGauge(ColumnVectorBatchPoolSize, columnVectorBatchPool.value())
+        .addGauge(IoThreadPoolSize, ioThreadPoolSize.value())
+        .addGauge(MaxDecodingTime, maxDecodingTime.value());
+    rateOfDecoding.snapshot(rb, true);
+
+    for (MutableQuantiles q : decodingTimes) {
+      q.snapshot(rb, true);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonQueueInfo.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonQueueInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonQueueInfo.java
deleted file mode 100644
index 7df7877..0000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonQueueInfo.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.llap.metrics;
-
-import org.apache.hadoop.metrics2.MetricsInfo;
-
-import com.google.common.base.Objects;
-
-/**
- * Llap daemon producer / consumer queue related metrics.
- */
-public enum LlapDaemonQueueInfo implements MetricsInfo {
-  QueueMetrics("Llap daemon producer/consumer queue related metrics"),
-  QueueSize("Size of the queue used by producer and consumer"),
-  PercentileProcessingTime("Percentiles processing time for an element from queue"),
-  MaxProcessingTime("Max processing time for an element from queue so far");
-
-  private final String desc;
-
-  LlapDaemonQueueInfo(String desc) {
-    this.desc = desc;
-  }
-
-  @Override
-  public String description() {
-    return desc;
-  }
-
-  @Override
-  public String toString() {
-    return Objects.toStringHelper(this)
-        .add("name", name()).add("description", desc)
-        .toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonQueueMetrics.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonQueueMetrics.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonQueueMetrics.java
deleted file mode 100644
index 10a0124..0000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonQueueMetrics.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.llap.metrics;
-
-import static org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueInfo.MaxProcessingTime;
-import static org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueInfo.QueueMetrics;
-import static org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueInfo.QueueSize;
-import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
-import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
-
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
-import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
-import org.apache.hadoop.metrics2.lib.MutableQuantiles;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-/**
- *
- */
-@Metrics(about = "LlapDaemon Queue Metrics", context = "queue")
-public class LlapDaemonQueueMetrics implements MetricsSource {
-  private final String name;
-  private final String sessionId;
-  private final MetricsRegistry registry;
-  private long maxTime = Long.MIN_VALUE;
-
-  @Metric
-  MutableGaugeInt queueSize;
-  @Metric
-  MutableRate rateOfProcessing;
-  final MutableQuantiles[] processingTimes;
-  @Metric
-  MutableGaugeLong maxProcessingTime;
-
-  private LlapDaemonQueueMetrics(String displayName, String sessionId, int[] intervals) {
-    this.name = displayName;
-    this.sessionId = sessionId;
-    this.registry = new MetricsRegistry("LlapDaemonQueueRegistry");
-    this.registry.tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId);
-
-    final int len = intervals == null ? 0 : intervals.length;
-    this.processingTimes = new MutableQuantiles[len];
-    for (int i=0; i<len; i++) {
-      int interval = intervals[i];
-      processingTimes[i] = registry.newQuantiles(
-          LlapDaemonQueueInfo.PercentileProcessingTime.name() + "_" + interval + "s",
-          LlapDaemonQueueInfo.PercentileProcessingTime.description(),
-          "ops", "latency", interval);
-    }
-  }
-
-  public static LlapDaemonQueueMetrics create(String displayName, String sessionId, int[] intervals) {
-    MetricsSystem ms = LlapMetricsSystem.instance();
-    return ms.register(displayName, null, new LlapDaemonQueueMetrics(displayName, sessionId, intervals));
-  }
-
-  @Override
-  public void getMetrics(MetricsCollector collector, boolean b) {
-    MetricsRecordBuilder rb = collector.addRecord(QueueMetrics)
-        .setContext("queue")
-        .tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME)
-        .tag(SessionId, sessionId);
-    getQueueStats(rb);
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public void setQueueSize(int size) {
-    queueSize.set(size);
-  }
-
-  public void addProcessingTime(long latency) {
-    rateOfProcessing.add(latency);
-    if (latency > maxTime) {
-      maxTime = latency;
-      maxProcessingTime.set(maxTime);
-    }
-    for (MutableQuantiles q : processingTimes) {
-      q.add(latency);
-    }
-  }
-
-  private void getQueueStats(MetricsRecordBuilder rb) {
-    rb.addGauge(QueueSize, queueSize.value())
-        .addGauge(MaxProcessingTime, maxProcessingTime.value())
-        .addGauge(MaxProcessingTime, maxProcessingTime.value());
-    rateOfProcessing.snapshot(rb, true);
-
-    for (MutableQuantiles q : processingTimes) {
-      q.snapshot(rb, true);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapMetricsSystem.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapMetricsSystem.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapMetricsSystem.java
deleted file mode 100644
index 710182d..0000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapMetricsSystem.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.llap.metrics;
-
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
-
-/**
- * Metrics system for llap daemon. We do not use DefaultMetricsSystem here to safegaurd against
- * Tez accidentally shutting it down.
- */
-public enum LlapMetricsSystem  {
-  INSTANCE;
-
-  private AtomicReference<MetricsSystem> impl =
-      new AtomicReference<MetricsSystem>(new MetricsSystemImpl());
-
-  /**
-   * Convenience method to initialize the metrics system
-   * @param prefix  for the metrics system configuration
-   * @return the metrics system instance
-   */
-  public static MetricsSystem initialize(String prefix) {
-    return INSTANCE.impl.get().init(prefix);
-  }
-
-  /**
-   * @return the metrics system object
-   */
-  public static MetricsSystem instance() {
-    return INSTANCE.impl.get();
-  }
-
-  /**
-   * Shutdown the metrics system
-   */
-  public static void shutdown() {
-    INSTANCE.impl.get().shutdown();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/MetricsUtils.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/MetricsUtils.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/MetricsUtils.java
deleted file mode 100644
index c22ce4e..0000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/MetricsUtils.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.llap.metrics;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.UUID;
-
-/**
- * Utility methods for metrics system.
- */
-public class MetricsUtils {
-  private static final String LOCALHOST = "localhost";
-  public static final String METRICS_PROCESS_NAME = "LlapDaemon";
-
-
-  public static String getHostName() {
-    try {
-      return InetAddress.getLocalHost().getHostName();
-    } catch (UnknownHostException e) {
-      return LOCALHOST;
-    }
-  }
-
-  public static String getUUID() {
-    return String.valueOf(UUID.randomUUID());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index f4cfa53..8ccedb7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -1317,10 +1317,16 @@ class EncodedReaderImpl implements EncodedReader {
   /** Pool factory that is used if another one isn't specified - just creates the objects. */
   private static class NoopPoolFactory implements PoolFactory {
     @Override
-    public <T> Pool<T> createPool(int size, final PoolObjectHelper<T> helper) {
+    public <T> Pool<T> createPool(final int size, final PoolObjectHelper<T> helper) {
       return new Pool<T>() {
         public void offer(T t) {
         }
+
+        @Override
+        public int size() {
+          return size;
+        }
+
         public T take() {
           return helper.create();
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/a42bc674/storage-api/src/java/org/apache/hadoop/hive/common/Pool.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/Pool.java b/storage-api/src/java/org/apache/hadoop/hive/common/Pool.java
index e41a515..272bbdd 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/Pool.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/Pool.java
@@ -29,4 +29,5 @@ public interface Pool<T> {
 
   T take();
   void offer(T t);
+  int size();
 }
\ No newline at end of file