You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by kw...@apache.org on 2016/12/01 18:53:10 UTC

[1/5] incubator-impala git commit: Bracketing Java logging output with log level checks.

Repository: incubator-impala
Updated Branches:
  refs/heads/master 5a158dbcd -> b1edca2a5


Bracketing Java logging output with log level checks.

This reduces creation of intermediate objects and improves performance.

Change-Id: Ie0f5123dbf2caf3b03183c76820599920baa9785
Reviewed-on: http://gerrit.cloudera.org:8080/5284
Reviewed-by: Marcel Kornacker <ma...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/352833b8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/352833b8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/352833b8

Branch: refs/heads/master
Commit: 352833b8cfcf5e0246f322fec1ee9b7612e0ed6a
Parents: 5a158db
Author: Marcel Kornacker <ma...@cloudera.com>
Authored: Wed Nov 30 12:51:20 2016 -0800
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Thu Dec 1 04:42:38 2016 +0000

----------------------------------------------------------------------
 .../impala/catalog/AuthorizationPolicy.java     | 22 ++++++---
 .../impala/catalog/CatalogServiceCatalog.java   | 34 +++++++++----
 .../java/org/apache/impala/catalog/Column.java  |  4 +-
 .../apache/impala/catalog/DataSourceTable.java  |  4 +-
 .../org/apache/impala/catalog/HdfsTable.java    | 50 +++++++++++++------
 .../apache/impala/catalog/ImpaladCatalog.java   | 16 ++++--
 .../impala/catalog/MetaStoreClientPool.java     |  4 +-
 .../impala/catalog/PartitionStatsUtil.java      |  2 +-
 .../java/org/apache/impala/catalog/Table.java   |  2 +-
 .../org/apache/impala/catalog/TableLoader.java  |  2 +-
 .../apache/impala/common/FileSystemUtil.java    | 26 ++++++----
 .../ExternalDataSourceExecutor.java             |  4 +-
 .../apache/impala/planner/AggregationNode.java  | 12 +++--
 .../apache/impala/planner/AnalyticEvalNode.java |  6 ++-
 .../apache/impala/planner/AnalyticPlanner.java  | 16 ++++--
 .../impala/planner/DataSourceScanNode.java      |  4 +-
 .../impala/planner/DistributedPlanner.java      | 30 +++++++-----
 .../apache/impala/planner/HBaseScanNode.java    |  8 ++-
 .../impala/planner/HdfsPartitionFilter.java     |  6 ++-
 .../org/apache/impala/planner/HdfsScanNode.java | 28 +++++++----
 .../org/apache/impala/planner/JoinNode.java     |  4 +-
 .../org/apache/impala/planner/KuduScanNode.java |  4 +-
 .../apache/impala/planner/ParallelPlanner.java  | 15 ++----
 .../java/org/apache/impala/planner/Planner.java | 16 +++---
 .../impala/planner/RuntimeFilterGenerator.java  |  6 ++-
 .../org/apache/impala/planner/SelectNode.java   |  4 +-
 .../impala/planner/SingleNodePlanner.java       | 32 ++++++++----
 .../org/apache/impala/planner/SortNode.java     | 12 +++--
 .../org/apache/impala/planner/UnionNode.java    |  4 +-
 .../impala/service/CatalogOpExecutor.java       | 51 +++++++++++++-------
 .../org/apache/impala/service/Frontend.java     | 14 ++++--
 .../org/apache/impala/service/JniFrontend.java  |  6 ++-
 .../impala/service/KuduCatalogOpExecutor.java   | 18 ++++---
 .../org/apache/impala/service/MetadataOp.java   | 10 ++--
 .../apache/impala/util/FsPermissionChecker.java |  7 ++-
 .../org/apache/impala/util/HdfsCachingUtil.java | 32 ++++++++----
 .../org/apache/impala/util/MetaStoreUtil.java   |  6 ++-
 .../apache/impala/util/RequestPoolService.java  | 19 +++++---
 .../apache/impala/util/SentryPolicyService.java | 41 ++++++++++------
 39 files changed, 388 insertions(+), 193 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
index 946ca30..56fc2f7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
+++ b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
@@ -116,15 +116,19 @@ public class AuthorizationPolicy implements PrivilegeCache {
    */
   public synchronized void addPrivilege(RolePrivilege privilege)
       throws CatalogException {
-    LOG.trace("Adding privilege: " + privilege.getName() +
-        " role ID: " + privilege.getRoleId());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Adding privilege: " + privilege.getName() +
+          " role ID: " + privilege.getRoleId());
+    }
     Role role = getRole(privilege.getRoleId());
     if (role == null) {
       throw new CatalogException(String.format("Error adding privilege: %s. Role ID " +
           "'%d' does not exist.", privilege.getName(), privilege.getRoleId()));
     }
-    LOG.trace("Adding privilege: " + privilege.getName() + " to role: " +
-        role.getName() + "ID: " + role.getId());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Adding privilege: " + privilege.getName() + " to role: " +
+          role.getName() + "ID: " + role.getId());
+    }
     role.addPrivilege(privilege);
   }
 
@@ -141,8 +145,10 @@ public class AuthorizationPolicy implements PrivilegeCache {
       throw new CatalogException(String.format("Error removing privilege: %s. Role ID " +
           "'%d' does not exist.", privilege.getName(), privilege.getRoleId()));
     }
-    LOG.trace("Removing privilege: '" + privilege.getName() + "' from Role ID: " +
-        privilege.getRoleId() + " Role Name: " + role.getName());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Removing privilege: '" + privilege.getName() + "' from Role ID: " +
+          privilege.getRoleId() + " Role Name: " + role.getName());
+    }
     return role.removePrivilege(privilege.getName());
   }
 
@@ -275,7 +281,9 @@ public class AuthorizationPolicy implements PrivilegeCache {
         for (RolePrivilege privilege: role.getPrivileges()) {
           String authorizeable = privilege.getName();
           if (authorizeable == null) {
-            LOG.trace("Ignoring invalid privilege: " + privilege.getName());
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("Ignoring invalid privilege: " + privilege.getName());
+            }
             continue;
           }
           privileges.add(authorizeable);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
index 7997412..e7c84da 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
@@ -194,7 +194,9 @@ public class CatalogServiceCatalog extends Catalog {
     }
 
     public void run() {
-      LOG.trace("Reloading cache pool names from HDFS");
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Reloading cache pool names from HDFS");
+      }
       // Map of cache pool name to CachePoolInfo. Stored in a map to allow Set operations
       // to be performed on the keys.
       Map<String, CachePoolInfo> currentCachePools = Maps.newHashMap();
@@ -296,8 +298,10 @@ public class CatalogServiceCatalog extends Catalog {
               try {
                 catalogTbl.setTable(tbl.toThrift());
               } catch (Exception e) {
-                LOG.debug(String.format("Error calling toThrift() on table %s.%s: %s",
-                    db.getName(), tblName, e.getMessage()), e);
+                if (LOG.isDebugEnabled()) {
+                  LOG.debug(String.format("Error calling toThrift() on table %s.%s: %s",
+                      db.getName(), tblName, e.getMessage()), e);
+                }
                 continue;
               }
               catalogTbl.setCatalog_version(tbl.getCatalogVersion());
@@ -519,7 +523,9 @@ public class CatalogServiceCatalog extends Catalog {
   private void loadFunctionsFromDbParams(Db db,
       org.apache.hadoop.hive.metastore.api.Database msDb) {
     if (msDb == null || msDb.getParameters() == null) return;
-    LOG.info("Loading native functions for database: " + db.getName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Loading native functions for database: " + db.getName());
+    }
     TCompactProtocol.Factory protocolFactory = new TCompactProtocol.Factory();
     for (String key: msDb.getParameters().keySet()) {
       if (!key.startsWith(Db.FUNCTION_INDEX_PREFIX)) continue;
@@ -545,7 +551,9 @@ public class CatalogServiceCatalog extends Catalog {
   private void loadJavaFunctions(Db db,
       List<org.apache.hadoop.hive.metastore.api.Function> functions) {
     Preconditions.checkNotNull(functions);
-    LOG.info("Loading Java functions for database: " + db.getName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Loading Java functions for database: " + db.getName());
+    }
     for (org.apache.hadoop.hive.metastore.api.Function function: functions) {
       try {
         for (Function fn: extractFunctions(db.getName(), function)) {
@@ -880,7 +888,9 @@ public class CatalogServiceCatalog extends Catalog {
    * Throws a CatalogException if there is an error loading table metadata.
    */
   public Table reloadTable(Table tbl) throws CatalogException {
-    LOG.debug(String.format("Refreshing table metadata: %s", tbl.getFullName()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Refreshing table metadata: %s", tbl.getFullName()));
+    }
     TTableName tblName = new TTableName(tbl.getDb().getName().toLowerCase(),
         tbl.getName().toLowerCase());
     Db db = tbl.getDb();
@@ -1009,8 +1019,10 @@ public class CatalogServiceCatalog extends Catalog {
     Preconditions.checkNotNull(updatedObjects);
     updatedObjects.first = null;
     updatedObjects.second = null;
-    LOG.debug(String.format("Invalidating table metadata: %s.%s",
-        tableName.getDb_name(), tableName.getTable_name()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Invalidating table metadata: %s.%s",
+          tableName.getDb_name(), tableName.getTable_name()));
+    }
     String dbName = tableName.getDb_name();
     String tblName = tableName.getTable_name();
 
@@ -1249,8 +1261,10 @@ public class CatalogServiceCatalog extends Catalog {
       String partitionName = hdfsPartition == null
           ? HdfsTable.constructPartitionName(partitionSpec)
           : hdfsPartition.getPartitionName();
-      LOG.debug(String.format("Refreshing Partition metadata: %s %s",
-          hdfsTable.getFullName(), partitionName));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Refreshing Partition metadata: %s %s",
+            hdfsTable.getFullName(), partitionName));
+      }
       try (MetaStoreClient msClient = getMetaStoreClient()) {
         org.apache.hadoop.hive.metastore.api.Partition hmsPartition = null;
         try {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/Column.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Column.java b/fe/src/main/java/org/apache/impala/catalog/Column.java
index 0830f61..e01fa0a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Column.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Column.java
@@ -68,7 +68,9 @@ public class Column {
 
   public boolean updateStats(ColumnStatisticsData statsData) {
     boolean statsDataCompatibleWithColType = stats_.update(type_, statsData);
-    LOG.debug("col stats: " + name_ + " #distinct=" + stats_.getNumDistinctValues());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("col stats: " + name_ + " #distinct=" + stats_.getNumDistinctValues());
+    }
     return statsDataCompatibleWithColType;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java b/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
index dab0c05..7370806 100644
--- a/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
@@ -163,7 +163,9 @@ public class DataSourceTable extends Table {
     Preconditions.checkNotNull(msTbl);
     msTable_ = msTbl;
     clearColumns();
-    LOG.debug("load table: " + db_.getName() + "." + name_);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("load table: " + db_.getName() + "." + name_);
+    }
     String dataSourceName = getRequiredTableProperty(msTbl, TBL_PROP_DATA_SRC_NAME, null);
     String location = getRequiredTableProperty(msTbl, TBL_PROP_LOCATION, dataSourceName);
     String className = getRequiredTableProperty(msTbl, TBL_PROP_CLASS, dataSourceName);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index 21b5359..66e28f8 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -370,7 +370,9 @@ public class HdfsTable extends Table {
     Preconditions.checkNotNull(fd);
     Preconditions.checkNotNull(perFsFileBlocks);
     Preconditions.checkArgument(!file.isDirectory());
-    LOG.debug("load block md for " + name_ + " file " + fd.getFileName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("load block md for " + name_ + " file " + fd.getFileName());
+    }
 
     if (!FileSystemUtil.hasGetFileBlockLocations(fs)) {
       synthesizeBlockMetadata(fs, fd, fileFormat);
@@ -463,8 +465,10 @@ public class HdfsTable extends Table {
       // part of the FileSystem interface, so we'll need to downcast.
       if (!(fs instanceof DistributedFileSystem)) continue;
 
-      LOG.trace("Loading disk ids for: " + getFullName() + ". nodes: " +
-          hostIndex_.size() + ". filesystem: " + fsKey);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Loading disk ids for: " + getFullName() + ". nodes: " +
+            hostIndex_.size() + ". filesystem: " + fsKey);
+      }
       DistributedFileSystem dfs = (DistributedFileSystem)fs;
       FileBlocksInfo blockLists = perFsFileBlocks.get(fsKey);
       Preconditions.checkNotNull(blockLists);
@@ -1083,7 +1087,9 @@ public class HdfsTable extends Table {
       // Load partition and file metadata
       if (reuseMetadata) {
         // Incrementally update this table's partitions and file metadata
-        LOG.debug("incremental update for table: " + db_.getName() + "." + name_);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("incremental update for table: " + db_.getName() + "." + name_);
+        }
         Preconditions.checkState(partitionsToUpdate == null || loadFileMetadata);
         updateMdFromHmsTable(msTbl);
         if (msTbl.getPartitionKeysSize() == 0) {
@@ -1093,7 +1099,9 @@ public class HdfsTable extends Table {
         }
       } else {
         // Load all partitions from Hive Metastore, including file metadata.
-        LOG.debug("load table from Hive Metastore: " + db_.getName() + "." + name_);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("load table from Hive Metastore: " + db_.getName() + "." + name_);
+        }
         List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions =
             Lists.newArrayList();
         msPartitions.addAll(MetaStoreUtil.fetchAllPartitions(
@@ -1133,7 +1141,9 @@ public class HdfsTable extends Table {
    * Updates the file metadata of an unpartitioned HdfsTable.
    */
   private void updateUnpartitionedTableFileMd() throws CatalogException {
-    LOG.debug("update unpartitioned table: " + name_);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("update unpartitioned table: " + name_);
+    }
     resetPartitions();
     org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
     Preconditions.checkNotNull(msTbl);
@@ -1156,7 +1166,9 @@ public class HdfsTable extends Table {
    */
   private void updatePartitionsFromHms(IMetaStoreClient client,
       Set<String> partitionsToUpdate, boolean loadFileMetadata) throws Exception {
-    LOG.debug("sync table partitions: " + name_);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("sync table partitions: " + name_);
+    }
     org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
     Preconditions.checkNotNull(msTbl);
     Preconditions.checkState(msTbl.getPartitionKeysSize() != 0);
@@ -1415,8 +1427,10 @@ public class HdfsTable extends Table {
       IMetaStoreClient client) throws Exception {
     Preconditions.checkNotNull(partitions);
     if (partitions.isEmpty()) return;
-    LOG.info(String.format("Incrementally updating %d/%d partitions.",
-        partitions.size(), partitionMap_.size()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Incrementally updating %d/%d partitions.",
+          partitions.size(), partitionMap_.size()));
+    }
     Set<String> partitionNames = Sets.newHashSet();
     for (HdfsPartition part: partitions) {
       partitionNames.add(part.getPartitionName());
@@ -1469,8 +1483,10 @@ public class HdfsTable extends Table {
   private void loadPartitionFileMetadata(List<HdfsPartition> partitions)
       throws Exception {
     Preconditions.checkNotNull(partitions);
-    LOG.info(String.format("loading file metadata for %d partitions",
-        partitions.size()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("loading file metadata for %d partitions",
+          partitions.size()));
+    }
     org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
     Preconditions.checkNotNull(msTbl);
     HdfsStorageDescriptor fileFormatDescriptor =
@@ -1782,8 +1798,10 @@ public class HdfsTable extends Table {
       List<List<String>> partitionsNotInHms) throws IOException {
     if (depth == partitionKeys.size()) {
       if (existingPartitions.contains(partitionExprs)) {
-        LOG.trace(String.format("Skip recovery of path '%s' because it already exists " +
-            "in metastore", path.toString()));
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(String.format("Skip recovery of path '%s' because it already "
+              + "exists in metastore", path.toString()));
+        }
       } else {
         partitionsNotInHms.add(partitionValues);
         existingPartitions.add(partitionExprs);
@@ -1837,8 +1855,10 @@ public class HdfsTable extends Table {
           }
         }
       } catch (Exception ex) {
-        LOG.debug(String.format("Invalid partition value (%s) for Type (%s).",
-            partName[1], type.toSql()));
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format("Invalid partition value (%s) for Type (%s).",
+              partName[1], type.toSql()));
+        }
         return null;
       }
     } else {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
index a59f997..6a20fcc 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
@@ -237,8 +237,10 @@ public class ImpaladCatalog extends Catalog {
       throws TableLoadingException, DatabaseNotFoundException {
     // This item is out of date and should not be applied to the catalog.
     if (catalogDeltaLog_.wasObjectRemovedAfter(catalogObject)) {
-      LOG.debug(String.format("Skipping update because a matching object was removed " +
-          "in a later catalog version: %s", catalogObject));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Skipping update because a matching object was removed " +
+            "in a later catalog version: %s", catalogObject));
+      }
       return;
     }
 
@@ -354,8 +356,10 @@ public class ImpaladCatalog extends Catalog {
       throws TableLoadingException {
     Db db = getDb(thriftTable.db_name);
     if (db == null) {
-      LOG.debug("Parent database of table does not exist: " +
-          thriftTable.db_name + "." + thriftTable.tbl_name);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Parent database of table does not exist: " +
+            thriftTable.db_name + "." + thriftTable.tbl_name);
+      }
       return;
     }
 
@@ -369,7 +373,9 @@ public class ImpaladCatalog extends Catalog {
     function.setCatalogVersion(catalogVersion);
     Db db = getDb(function.getFunctionName().getDb());
     if (db == null) {
-      LOG.debug("Parent database of function does not exist: " + function.getName());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Parent database of function does not exist: " + function.getName());
+      }
       return;
     }
     Function existingFn = db.getFunction(fn.getSignature());

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java b/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
index 29e5df9..76db6f7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
+++ b/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
@@ -79,7 +79,9 @@ public class MetaStoreClientPool {
      * connection to the HMS before giving up and failing out with an exception.
      */
     private MetaStoreClient(HiveConf hiveConf, int cnxnTimeoutSec) {
-      LOG.debug("Creating MetaStoreClient. Pool Size = " + clientPool_.size());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Creating MetaStoreClient. Pool Size = " + clientPool_.size());
+      }
 
       long retryDelaySeconds = hiveConf.getTimeVar(
           HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java b/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
index 8285e9b..4444de6 100644
--- a/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
+++ b/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
@@ -112,7 +112,7 @@ public class PartitionStatsUtil {
         partition.putToParameters(INCREMENTAL_STATS_CHUNK_PREFIX + i, chunks.get(i));
       }
     } catch (TException e) {
-      LOG.info("Error saving partition stats: ", e);
+      LOG.error("Error saving partition stats: ", e);
       // TODO: What to throw here?
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/Table.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java b/fe/src/main/java/org/apache/impala/catalog/Table.java
index 7bde786..214413b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Table.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Table.java
@@ -154,7 +154,7 @@ public abstract class Table implements CatalogObject {
    * the correctness of the system.
    */
   protected void loadAllColumnStats(IMetaStoreClient client) {
-    LOG.debug("Loading column stats for table: " + name_);
+    if (LOG.isDebugEnabled()) LOG.debug("Loading column stats for table: " + name_);
     List<ColumnStatisticsObj> colStats;
 
     // We need to only query those columns which may have stats; asking HMS for other

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/TableLoader.java b/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
index c3ae2ba..1d0d54e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
@@ -55,7 +55,7 @@ public class TableLoader {
    */
   public Table load(Db db, String tblName) {
     String fullTblName = db.getName() + "." + tblName;
-    LOG.info("Loading metadata for: " + fullTblName);
+    if (LOG.isDebugEnabled()) LOG.debug("Loading metadata for: " + fullTblName);
     Table table;
     // turn all exceptions into TableLoadingException
     try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
index c81e27f..f167771 100644
--- a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
+++ b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
@@ -57,7 +57,7 @@ public class FileSystemUtil {
     for (FileStatus fStatus: fs.listStatus(directory)) {
       // Only delete files that are not hidden.
       if (fStatus.isFile() && !isHiddenFile(fStatus.getPath().getName())) {
-        LOG.debug("Removing: " + fStatus.getPath());
+        if (LOG.isDebugEnabled()) LOG.debug("Removing: " + fStatus.getPath());
         fs.delete(fStatus.getPath(), false);
         ++numFilesDeleted;
       }
@@ -123,7 +123,9 @@ public class FileSystemUtil {
     int numFilesMoved = 0;
     for (FileStatus fStatus: sourceFs.listStatus(sourceDir)) {
       if (fStatus.isDirectory()) {
-        LOG.debug("Skipping copy of directory: " + fStatus.getPath());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Skipping copy of directory: " + fStatus.getPath());
+        }
         continue;
       } else if (isHiddenFile(fStatus.getPath().getName())) {
         continue;
@@ -180,8 +182,10 @@ public class FileSystemUtil {
     // non-distributed filesystem.
     if (!doRename) doRename = !destIsDfs && sameFileSystem;
     if (doRename) {
-      LOG.debug(String.format(
-          "Moving '%s' to '%s'", sourceFile.toString(), destFile.toString()));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format(
+            "Moving '%s' to '%s'", sourceFile.toString(), destFile.toString()));
+      }
       // Move (rename) the file.
       destFs.rename(sourceFile, destFile);
       return;
@@ -192,13 +196,17 @@ public class FileSystemUtil {
       // encryption zones. A move would return an error from the NN because a move is a
       // metadata-only operation and the files would not be encrypted/decrypted properly
       // on the DNs.
-      LOG.info(String.format(
-          "Copying source '%s' to '%s' because HDFS encryption zones are different.",
-          sourceFile, destFile));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format(
+            "Copying source '%s' to '%s' because HDFS encryption zones are different.",
+            sourceFile, destFile));
+      }
     } else {
       Preconditions.checkState(!sameFileSystem);
-      LOG.info(String.format("Copying '%s' to '%s' between filesystems.",
-          sourceFile, destFile));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Copying '%s' to '%s' between filesystems.",
+            sourceFile, destFile));
+      }
     }
     FileUtil.copy(sourceFs, sourceFile, destFs, destFile, true, true, CONF);
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java b/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
index 49d9426..7e8859a 100644
--- a/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
+++ b/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
@@ -157,7 +157,9 @@ public class ExternalDataSourceExecutor {
         if (initString_ != null && initString_.startsWith(CACHE_CLASS_PREFIX)) {
           cachedClasses_.put(cacheMapKey, c);
         }
-        LOG.info("Loaded jar for class {} at path {}", className_, jarPath_);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Loaded jar for class {} at path {}", className_, jarPath_);
+        }
         numClassCacheMisses_++;
       } else {
         numClassCacheHits_++;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/AggregationNode.java b/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
index 57dbd8f..07c51f1 100644
--- a/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
@@ -181,10 +181,14 @@ public class AggregationNode extends PlanNode {
     cardinality_ = aggInfo_.getGroupingExprs().isEmpty() ? 1 :
       Expr.getNumDistinctValues(aggInfo_.getGroupingExprs());
     // take HAVING predicate into account
-    LOG.trace("Agg: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Agg: cardinality=" + Long.toString(cardinality_));
+    }
     if (cardinality_ > 0) {
       cardinality_ = Math.round((double) cardinality_ * computeSelectivity());
-      LOG.trace("sel=" + Double.toString(computeSelectivity()));
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("sel=" + Double.toString(computeSelectivity()));
+      }
     }
     // if we ended up with an overflow, the estimate is certain to be wrong
     if (cardinality_ < 0) cardinality_ = -1;
@@ -199,7 +203,9 @@ public class AggregationNode extends PlanNode {
       }
     }
     cardinality_ = capAtLimit(cardinality_);
-    LOG.trace("stats Agg: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("stats Agg: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java b/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
index 6e07e79..408680b 100644
--- a/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
@@ -121,7 +121,9 @@ public class AnalyticEvalNode extends PlanNode {
     // do this at the end so it can take all conjuncts into account
     computeStats(analyzer);
 
-    LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
+    }
 
     // point fn calls, partition and ordering exprs at our input
     ExprSubstitutionMap childSmap = getCombinedChildSmap();
@@ -129,7 +131,7 @@ public class AnalyticEvalNode extends PlanNode {
     substitutedPartitionExprs_ = Expr.substituteList(partitionExprs_, childSmap,
         analyzer, false);
     orderByElements_ = OrderByElement.substitute(orderByElements_, childSmap, analyzer);
-    LOG.trace("evalnode: " + debugString());
+    if (LOG.isTraceEnabled()) LOG.trace("evalnode: " + debugString());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java b/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
index e44fc0b..6d726ec 100644
--- a/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
@@ -310,7 +310,9 @@ public class AnalyticPlanner {
 
     SortInfo sortInfo = new SortInfo(
         Expr.substituteList(sortExprs, sortSmap, analyzer_, false), isAsc, nullsFirst);
-    LOG.trace("sortinfo exprs: " + Expr.debugString(sortInfo.getOrderingExprs()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("sortinfo exprs: " + Expr.debugString(sortInfo.getOrderingExprs()));
+    }
     sortInfo.setMaterializedTupleInfo(sortTupleDesc, sortSlotExprs);
     return sortInfo;
   }
@@ -373,7 +375,9 @@ public class AnalyticPlanner {
       sortTupleId = sortNode.tupleIds_.get(0);
       bufferedTupleDesc =
           analyzer_.getDescTbl().copyTupleDescriptor(sortTupleId, "buffered-tuple");
-      LOG.trace("desctbl: " + analyzer_.getDescTbl().debugString());
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("desctbl: " + analyzer_.getDescTbl().debugString());
+      }
 
       List<SlotDescriptor> inputSlots = analyzer_.getTupleDesc(sortTupleId).getSlots();
       List<SlotDescriptor> bufferedSlots = bufferedTupleDesc.getSlots();
@@ -399,7 +403,9 @@ public class AnalyticPlanner {
         partitionByEq = createNullMatchingEquals(
             Expr.substituteList(windowGroup.partitionByExprs, sortSmap, analyzer_, false),
             sortTupleId, bufferedSmap);
-        LOG.trace("partitionByEq: " + partitionByEq.debugString());
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("partitionByEq: " + partitionByEq.debugString());
+        }
       }
       Expr orderByEq = null;
       if (!windowGroup.orderByElements.isEmpty()) {
@@ -407,7 +413,9 @@ public class AnalyticPlanner {
             OrderByElement.getOrderByExprs(OrderByElement.substitute(
                 windowGroup.orderByElements, sortSmap, analyzer_)),
             sortTupleId, bufferedSmap);
-        LOG.trace("orderByEq: " + orderByEq.debugString());
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("orderByEq: " + orderByEq.debugString());
+        }
       }
 
       root = new AnalyticEvalNode(ctx_.getNextNodeId(), root,

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java b/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
index 307e67e..22bdb49 100644
--- a/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
@@ -272,7 +272,9 @@ public class DataSourceScanNode extends ScanNode {
     LOG.debug("computeStats DataSourceScan: cardinality=" + Long.toString(cardinality_));
 
     numNodes_ = table_.getNumNodes();
-    LOG.debug("computeStats DataSourceScan: #nodes=" + Integer.toString(numNodes_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats DataSourceScan: #nodes=" + Integer.toString(numNodes_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
index 24d7caa..3cfc787 100644
--- a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
@@ -78,9 +78,11 @@ public class DistributedPlanner {
       Preconditions.checkState(!queryStmt.hasOffset());
       isPartitioned = true;
     }
-    LOG.debug("create plan fragments");
     long perNodeMemLimit = ctx_.getQueryOptions().mem_limit;
-    LOG.debug("memlimit=" + Long.toString(perNodeMemLimit));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("create plan fragments");
+      LOG.debug("memlimit=" + Long.toString(perNodeMemLimit));
+    }
     createPlanFragments(singleNodePlan, isPartitioned, perNodeMemLimit, fragments);
     return fragments;
   }
@@ -435,10 +437,12 @@ public class DistributedPlanner {
         broadcastCost = 2 * rhsDataSize * leftChildFragment.getNumNodes();
       }
     }
-    LOG.debug("broadcast: cost=" + Long.toString(broadcastCost));
-    LOG.debug("card=" + Long.toString(rhsTree.getCardinality()) + " row_size="
-        + Float.toString(rhsTree.getAvgRowSize()) + " #nodes="
-        + Integer.toString(leftChildFragment.getNumNodes()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("broadcast: cost=" + Long.toString(broadcastCost));
+      LOG.debug("card=" + Long.toString(rhsTree.getCardinality()) + " row_size="
+          + Float.toString(rhsTree.getAvgRowSize()) + " #nodes="
+          + Integer.toString(leftChildFragment.getNumNodes()));
+    }
 
     // repartition: both left- and rightChildFragment are partitioned on the
     // join exprs, and a hash table is built with the rightChildFragment's output.
@@ -465,12 +469,14 @@ public class DistributedPlanner {
       double rhsNetworkCost = (rhsHasCompatPartition) ? 0.0 : rhsDataSize;
       partitionCost = Math.round(lhsNetworkCost + rhsNetworkCost + rhsDataSize);
     }
-    LOG.debug("partition: cost=" + Long.toString(partitionCost));
-    LOG.debug("lhs card=" + Long.toString(lhsTree.getCardinality()) + " row_size="
-        + Float.toString(lhsTree.getAvgRowSize()));
-    LOG.debug("rhs card=" + Long.toString(rhsTree.getCardinality()) + " row_size="
-        + Float.toString(rhsTree.getAvgRowSize()));
-    LOG.debug(rhsTree.getExplainString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("partition: cost=" + Long.toString(partitionCost));
+      LOG.debug("lhs card=" + Long.toString(lhsTree.getCardinality()) + " row_size="
+          + Float.toString(lhsTree.getAvgRowSize()));
+      LOG.debug("rhs card=" + Long.toString(rhsTree.getCardinality()) + " row_size="
+          + Float.toString(rhsTree.getAvgRowSize()));
+      LOG.debug(rhsTree.getExplainString());
+    }
 
     boolean doBroadcast = false;
     // we do a broadcast join if

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java b/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
index e8b26bc..40b44d3 100644
--- a/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
@@ -216,11 +216,15 @@ public class HBaseScanNode extends ScanNode {
     cardinality_ *= computeSelectivity();
     cardinality_ = Math.max(1, cardinality_);
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("computeStats HbaseScan: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats HbaseScan: cardinality=" + Long.toString(cardinality_));
+    }
 
     // TODO: take actual regions into account
     numNodes_ = tbl.getNumNodes();
-    LOG.debug("computeStats HbaseScan: #nodes=" + Integer.toString(numNodes_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats HbaseScan: #nodes=" + Integer.toString(numNodes_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
index 8d15425..7368358 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
@@ -119,8 +119,10 @@ public class HdfsPartitionFilter {
     }
 
     Expr literalPredicate = predicate_.substitute(sMap, analyzer, false);
-    LOG.trace("buildPartitionPredicate: " + literalPredicate.toSql() + " " +
-        literalPredicate.debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("buildPartitionPredicate: " + literalPredicate.toSql() + " " +
+          literalPredicate.debugString());
+    }
     Preconditions.checkState(literalPredicate.isConstant());
     return literalPredicate;
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
index 66ed792..a2ad76c 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
@@ -373,7 +373,9 @@ public class HdfsScanNode extends ScanNode {
   @Override
   public void computeStats(Analyzer analyzer) {
     super.computeStats(analyzer);
-    LOG.debug("collecting partitions for table " + tbl_.getName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("collecting partitions for table " + tbl_.getName());
+    }
     numPartitionsMissingStats_ = 0;
     totalFiles_ = 0;
     totalBytes_ = 0;
@@ -430,17 +432,23 @@ public class HdfsScanNode extends ScanNode {
     }
 
     if (cardinality_ > 0) {
-      LOG.debug("cardinality_=" + Long.toString(cardinality_) +
-                " sel=" + Double.toString(computeSelectivity()));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("cardinality_=" + Long.toString(cardinality_) +
+                  " sel=" + Double.toString(computeSelectivity()));
+      }
       cardinality_ = Math.round(cardinality_ * computeSelectivity());
       // IMPALA-2165: Avoid setting the cardinality to 0 after rounding.
       cardinality_ = Math.max(cardinality_, 1);
     }
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("computeStats HdfsScan: cardinality_=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats HdfsScan: cardinality_=" + Long.toString(cardinality_));
+    }
 
     computeNumNodes(analyzer, cardinality_);
-    LOG.debug("computeStats HdfsScan: #nodes=" + Integer.toString(numNodes_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats HdfsScan: #nodes=" + Integer.toString(numNodes_));
+    }
   }
 
   /**
@@ -493,10 +501,12 @@ public class HdfsScanNode extends ScanNode {
     // Tables can reside on 0 nodes (empty table), but a plan node must always be
     // executed on at least one node.
     numNodes_ = (cardinality == 0 || totalNodes == 0) ? 1 : totalNodes;
-    LOG.debug("computeNumNodes totalRanges=" + scanRanges_.size() +
-        " localRanges=" + numLocalRanges + " remoteRanges=" + numRemoteRanges +
-        " localHostSet.size=" + localHostSet.size() +
-        " clusterNodes=" + cluster.numNodes());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeNumNodes totalRanges=" + scanRanges_.size() +
+          " localRanges=" + numLocalRanges + " remoteRanges=" + numRemoteRanges +
+          " localHostSet.size=" + localHostSet.size() +
+          " clusterNodes=" + cluster.numNodes());
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/JoinNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/JoinNode.java b/fe/src/main/java/org/apache/impala/planner/JoinNode.java
index 3362047..13cc854 100644
--- a/fe/src/main/java/org/apache/impala/planner/JoinNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/JoinNode.java
@@ -475,7 +475,9 @@ public abstract class JoinNode extends PlanNode {
     }
     cardinality_ = capAtLimit(cardinality_);
     Preconditions.checkState(hasValidStats());
-    LOG.debug("stats Join: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("stats Join: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
index 61f6b28..cca7a6f 100644
--- a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
@@ -223,7 +223,9 @@ public class KuduScanNode extends ScanNode {
     cardinality_ *= computeSelectivity();
     cardinality_ = Math.min(Math.max(1, cardinality_), kuduTable_.getNumRows());
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("computeStats KuduScan: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats KuduScan: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java b/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
index 6db4ae4..8f2a1a4 100644
--- a/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
@@ -83,13 +83,11 @@ public class ParallelPlanner {
    * Assign fragment's plan id and cohort id to children.
    */
   private void createBuildPlans(PlanFragment fragment, CohortId buildCohortId) {
-    LOG.info("createbuildplans fragment " + fragment.getId().toString());
     List<JoinNode> joins = Lists.newArrayList();
     collectJoins(fragment.getPlanRoot(), joins);
     if (!joins.isEmpty()) {
       List<String> joinIds = Lists.newArrayList();
       for (JoinNode join: joins) joinIds.add(join.getId().toString());
-      LOG.info("collected joins " + Joiner.on(" ").join(joinIds));
 
       if (buildCohortId == null) buildCohortId = cohortIdGenerator_.getNextId();
       for (JoinNode join: joins) createBuildPlan(join, buildCohortId);
@@ -98,8 +96,6 @@ public class ParallelPlanner {
     if (!fragment.getChildren().isEmpty()) {
       List<String> ids = Lists.newArrayList();
       for (PlanFragment c: fragment.getChildren()) ids.add(c.getId().toString());
-      LOG.info("collected children " + Joiner.on(" ").join(ids) + " parent "
-          + fragment.getId().toString());
     }
     for (PlanFragment child: fragment.getChildren()) {
       child.setPlanId(fragment.getPlanId());
@@ -147,7 +143,6 @@ public class ParallelPlanner {
    * Also assigns the new plan a plan id.
    */
   private void createBuildPlan(JoinNode join, CohortId cohortId) {
-    LOG.info("createbuildplan " + join.getId().toString());
     Preconditions.checkNotNull(cohortId);
     // collect all ExchangeNodes on the build side and their corresponding input
     // fragments
@@ -183,8 +178,6 @@ public class ParallelPlanner {
 
     // move input fragments
     for (int i = 0; i < exchNodes.size(); ++i) {
-      LOG.info("re-link fragment " + inputFragments.get(i).getId().toString() + " to "
-          + exchNodes.get(i).getFragment().getId().toString());
       Preconditions.checkState(exchNodes.get(i).getFragment() == buildFragment);
       join.getFragment().removeChild(inputFragments.get(i));
       buildFragment.getChildren().add(inputFragments.get(i));
@@ -196,9 +189,11 @@ public class ParallelPlanner {
     buildFragment.setCohortId(cohortId);
 
     planRoots_.add(buildFragment);
-    LOG.info("new build fragment " + buildFragment.getId().toString());
-    LOG.info("in cohort " + buildFragment.getCohortId().toString());
-    LOG.info("for join node " + join.getId().toString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("new build fragment " + buildFragment.getId().toString());
+      LOG.trace("in cohort " + buildFragment.getCohortId().toString());
+      LOG.trace("for join node " + join.getId().toString());
+    }
     createBuildPlans(buildFragment, null);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/Planner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/Planner.java b/fe/src/main/java/org/apache/impala/planner/Planner.java
index 1762144..40ca682 100644
--- a/fe/src/main/java/org/apache/impala/planner/Planner.java
+++ b/fe/src/main/java/org/apache/impala/planner/Planner.java
@@ -164,9 +164,11 @@ public class Planner {
     }
     rootFragment.setOutputExprs(resultExprs);
 
-    LOG.debug("desctbl: " + ctx_.getRootAnalyzer().getDescTbl().debugString());
-    LOG.debug("resultexprs: " + Expr.debugString(rootFragment.getOutputExprs()));
-    LOG.debug("finalize plan fragments");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("desctbl: " + ctx_.getRootAnalyzer().getDescTbl().debugString());
+      LOG.debug("resultexprs: " + Expr.debugString(rootFragment.getOutputExprs()));
+      LOG.debug("finalize plan fragments");
+    }
     for (PlanFragment fragment: fragments) {
       fragment.finalize(ctx_.getRootAnalyzer());
     }
@@ -214,7 +216,7 @@ public class Planner {
         graph.addTargetColumnLabels(ctx_.getQueryStmt().getColLabels());
         graph.computeLineageGraph(resultExprs, ctx_.getRootAnalyzer());
       }
-      LOG.trace("lineage: " + graph.debugString());
+      if (LOG.isTraceEnabled()) LOG.trace("lineage: " + graph.debugString());
       ctx_.getRootAnalyzer().getTimeline().markEvent("Lineage info computed");
     }
 
@@ -407,8 +409,10 @@ public class Planner {
     request.setPer_host_mem_req(maxPerHostMem);
     request.setPer_host_vcores((short) maxPerHostVcores);
 
-    LOG.debug("Estimated per-host peak memory requirement: " + maxPerHostMem);
-    LOG.debug("Estimated per-host virtual cores requirement: " + maxPerHostVcores);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Estimated per-host peak memory requirement: " + maxPerHostMem);
+      LOG.debug("Estimated per-host virtual cores requirement: " + maxPerHostVcores);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
index 79cd8b8..133a14e 100644
--- a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
+++ b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
@@ -245,7 +245,9 @@ public final class RuntimeFilterGenerator {
       // Ensure that the targer expr does not contain TupleIsNull predicates as these
       // can't be evaluated at a scan node.
       targetExpr = TupleIsNullPredicate.unwrapExpr(targetExpr.clone());
-      LOG.trace("Generating runtime filter from predicate " + joinPredicate);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Generating runtime filter from predicate " + joinPredicate);
+      }
       return new RuntimeFilter(idGen.getNextId(), filterSrcNode,
           srcExpr, targetExpr, targetSlots);
     }
@@ -424,7 +426,7 @@ public final class RuntimeFilterGenerator {
     }
     for (RuntimeFilter filter:
          filters.subList(0, Math.min(filters.size(), maxNumFilters))) {
-      LOG.trace("Runtime filter: " + filter.debugString());
+      if (LOG.isTraceEnabled()) LOG.trace("Runtime filter: " + filter.debugString());
       filter.assignToPlanNodes();
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/SelectNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SelectNode.java b/fe/src/main/java/org/apache/impala/planner/SelectNode.java
index 7713520..c571c1c 100644
--- a/fe/src/main/java/org/apache/impala/planner/SelectNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/SelectNode.java
@@ -74,7 +74,9 @@ public class SelectNode extends PlanNode {
       Preconditions.checkState(cardinality_ >= 0);
     }
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("stats Select: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("stats Select: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
index f62c236..403a432 100644
--- a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
@@ -141,7 +141,9 @@ public class SingleNodePlanner {
       analyzer.materializeSlots(queryStmt.getBaseTblResultExprs());
     }
 
-    LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
+    }
     PlanNode singleNodePlan = createQueryPlan(queryStmt, analyzer,
         ctx_.getQueryOptions().isDisable_outermost_topn());
     Preconditions.checkNotNull(singleNodePlan);
@@ -363,15 +365,19 @@ public class SingleNodePlanner {
         // use 0 for the size to avoid it becoming the leftmost input
         // TODO: Consider raw size of scanned partitions in the absence of stats.
         candidates.add(new Pair(ref, new Long(0)));
-        LOG.trace("candidate " + ref.getUniqueAlias() + ": 0");
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("candidate " + ref.getUniqueAlias() + ": 0");
+        }
         continue;
       }
       Preconditions.checkState(ref.isAnalyzed());
       long materializedSize =
           (long) Math.ceil(plan.getAvgRowSize() * (double) plan.getCardinality());
       candidates.add(new Pair(ref, new Long(materializedSize)));
-      LOG.trace(
-          "candidate " + ref.getUniqueAlias() + ": " + Long.toString(materializedSize));
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(
+            "candidate " + ref.getUniqueAlias() + ": " + Long.toString(materializedSize));
+      }
     }
     if (candidates.isEmpty()) return null;
 
@@ -402,7 +408,9 @@ public class SingleNodePlanner {
       List<Pair<TableRef, PlanNode>> refPlans, List<SubplanRef> subplanRefs)
       throws ImpalaException {
 
-    LOG.trace("createJoinPlan: " + leftmostRef.getUniqueAlias());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("createJoinPlan: " + leftmostRef.getUniqueAlias());
+    }
     // the refs that have yet to be joined
     List<Pair<TableRef, PlanNode>> remainingRefs = Lists.newArrayList();
     PlanNode root = null;  // root of accumulated join plan
@@ -458,7 +466,9 @@ public class SingleNodePlanner {
         analyzer.setAssignedConjuncts(root.getAssignedConjuncts());
         PlanNode candidate = createJoinNode(root, entry.second, ref, analyzer);
         if (candidate == null) continue;
-        LOG.trace("cardinality=" + Long.toString(candidate.getCardinality()));
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("cardinality=" + Long.toString(candidate.getCardinality()));
+        }
 
         // Use 'candidate' as the new root; don't consider any other table refs at this
         // position in the plan.
@@ -489,10 +499,12 @@ public class SingleNodePlanner {
       long lhsCardinality = root.getCardinality();
       long rhsCardinality = minEntry.second.getCardinality();
       numOps += lhsCardinality + rhsCardinality;
-      LOG.debug(Integer.toString(i) + " chose " + minEntry.first.getUniqueAlias()
-          + " #lhs=" + Long.toString(lhsCardinality)
-          + " #rhs=" + Long.toString(rhsCardinality)
-          + " #ops=" + Long.toString(numOps));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(Integer.toString(i) + " chose " + minEntry.first.getUniqueAlias()
+            + " #lhs=" + Long.toString(lhsCardinality)
+            + " #rhs=" + Long.toString(rhsCardinality)
+            + " #ops=" + Long.toString(numOps));
+      }
       remainingRefs.remove(minEntry);
       joinedRefs.add(minEntry.first);
       root = newRoot;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/SortNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SortNode.java b/fe/src/main/java/org/apache/impala/planner/SortNode.java
index 3a71f8c..58e04b4 100644
--- a/fe/src/main/java/org/apache/impala/planner/SortNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/SortNode.java
@@ -123,16 +123,20 @@ public class SortNode extends PlanNode {
     info_.substituteOrderingExprs(outputSmap_, analyzer);
     info_.checkConsistency();
 
-    LOG.trace("sort id " + tupleIds_.get(0).toString() + " smap: "
-        + outputSmap_.debugString());
-    LOG.trace("sort input exprs: " + Expr.debugString(resolvedTupleExprs_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("sort id " + tupleIds_.get(0).toString() + " smap: "
+          + outputSmap_.debugString());
+      LOG.trace("sort input exprs: " + Expr.debugString(resolvedTupleExprs_));
+    }
   }
 
   @Override
   protected void computeStats(Analyzer analyzer) {
     super.computeStats(analyzer);
     cardinality_ = capAtLimit(getChild(0).cardinality_);
-    LOG.debug("stats Sort: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("stats Sort: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/UnionNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/UnionNode.java b/fe/src/main/java/org/apache/impala/planner/UnionNode.java
index a085973..69e7a37 100644
--- a/fe/src/main/java/org/apache/impala/planner/UnionNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/UnionNode.java
@@ -97,7 +97,9 @@ public class UnionNode extends PlanNode {
     // (VALUES(1 x, 1 y)) b ON (a.x = b.y)). We need to set the correct value.
     if (numNodes_ == -1) numNodes_ = 1;
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("stats Union: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("stats Union: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 9748003..52000a9 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -636,7 +636,9 @@ public class CatalogOpExecutor {
 
       // Set the altered view attributes and update the metastore.
       setViewAttributes(params, msTbl);
-      LOG.debug(String.format("Altering view %s", tableName));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Altering view %s", tableName));
+      }
       applyAlterTable(msTbl);
       try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
         tbl.load(true, msClient.getHiveClient(), msTbl);
@@ -665,7 +667,9 @@ public class CatalogOpExecutor {
 
     TableName tableName = table.getTableName();
     Preconditions.checkState(tableName != null && tableName.isFullyQualified());
-    LOG.info(String.format("Updating table stats for: %s", tableName));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Updating table stats for: %s", tableName));
+    }
 
     // Deep copy the msTbl to avoid updating our cache before successfully persisting
     // the results to the metastore.
@@ -761,8 +765,10 @@ public class CatalogOpExecutor {
       // but it is predictable and easy to reason about because it does not depend on the
       // existing state of the metadata. See IMPALA-2201.
       long numRows = partitionStats.stats.num_rows;
-      LOG.debug(String.format("Updating stats for partition %s: numRows=%s",
-          partition.getValuesAsString(), numRows));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Updating stats for partition %s: numRows=%s",
+            partition.getValuesAsString(), numRows));
+      }
       PartitionStatsUtil.partStatsToParameters(partitionStats, partition);
       partition.putToParameters(StatsSetupConst.ROW_COUNT, String.valueOf(numRows));
       partition.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
@@ -809,10 +815,12 @@ public class CatalogOpExecutor {
       ColumnStatisticsData colStatsData =
           createHiveColStatsData(entry.getValue(), tableCol.getType());
       if (colStatsData == null) continue;
-      LOG.debug(String.format("Updating column stats for %s: numDVs=%s numNulls=%s " +
-          "maxSize=%s avgSize=%s", colName, entry.getValue().getNum_distinct_values(),
-          entry.getValue().getNum_nulls(), entry.getValue().getMax_size(),
-          entry.getValue().getAvg_size()));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Updating column stats for %s: numDVs=%s numNulls=%s " +
+            "maxSize=%s avgSize=%s", colName, entry.getValue().getNum_distinct_values(),
+            entry.getValue().getNum_nulls(), entry.getValue().getMax_size(),
+            entry.getValue().getAvg_size()));
+      }
       ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(colName,
           tableCol.getType().toString().toLowerCase(), colStatsData);
       colStats.addToStatsObj(colStatsObj);
@@ -879,8 +887,10 @@ public class CatalogOpExecutor {
     Preconditions.checkState(dbName != null && !dbName.isEmpty(),
         "Null or empty database name passed as argument to Catalog.createDatabase");
     if (params.if_not_exists && catalog_.getDb(dbName) != null) {
-      LOG.debug("Skipping database creation because " + dbName + " already exists and " +
-          "IF NOT EXISTS was specified.");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Skipping database creation because " + dbName + " already exists "
+            + "and IF NOT EXISTS was specified.");
+      }
       resp.getResult().setVersion(catalog_.getCatalogVersion());
       return;
     }
@@ -893,7 +903,7 @@ public class CatalogOpExecutor {
     if (params.getLocation() != null) {
       db.setLocationUri(params.getLocation());
     }
-    LOG.debug("Creating database " + dbName);
+    if (LOG.isDebugEnabled()) LOG.debug("Creating database " + dbName);
     Db newDb = null;
     synchronized (metastoreDdlLock_) {
       try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
@@ -905,8 +915,10 @@ public class CatalogOpExecutor {
             throw new ImpalaRuntimeException(
                 String.format(HMS_RPC_ERROR_FORMAT_STR, "createDatabase"), e);
           }
-          LOG.debug(String.format("Ignoring '%s' when creating database %s because " +
-              "IF NOT EXISTS was specified.", e, dbName));
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format("Ignoring '%s' when creating database %s because " +
+                "IF NOT EXISTS was specified.", e, dbName));
+          }
           newDb = catalog_.getDb(dbName);
           if (newDb == null) {
             try {
@@ -946,8 +958,10 @@ public class CatalogOpExecutor {
  private void createFunction(TCreateFunctionParams params, TDdlExecResponse resp)
       throws ImpalaException {
     Function fn = Function.fromThrift(params.getFn());
-    LOG.debug(String.format("Adding %s: %s",
-        fn.getClass().getSimpleName(), fn.signatureString()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Adding %s: %s",
+          fn.getClass().getSimpleName(), fn.signatureString()));
+    }
     boolean isPersistentJavaFn =
         (fn.getBinaryType() == TFunctionBinaryType.JAVA) && fn.isPersistent();
     synchronized (metastoreDdlLock_) {
@@ -982,10 +996,11 @@ public class CatalogOpExecutor {
             "No compatible function signatures found in class: " + hiveFn.getClassName());
         }
         if (addJavaFunctionToHms(fn.dbName(), hiveFn, params.if_not_exists)) {
-          LOG.info("Funcs size:" + funcs.size());
           for (Function addedFn: funcs) {
-            LOG.info(String.format("Adding function: %s.%s", addedFn.dbName(),
-                addedFn.signatureString()));
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(String.format("Adding function: %s.%s", addedFn.dbName(),
+                  addedFn.signatureString()));
+            }
             Preconditions.checkState(catalog_.addFunction(addedFn));
             addedFunctions.add(buildTCatalogFnObject(addedFn));
           }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/Frontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java b/fe/src/main/java/org/apache/impala/service/Frontend.java
index 5e0307c..a7bec4d 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -839,8 +839,10 @@ public class Frontend {
         return false;
       }
 
-      LOG.trace(String.format("Waiting for table(s) to complete loading: %s",
-          Joiner.on(", ").join(missingTbls)));
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(String.format("Waiting for table(s) to complete loading: %s",
+            Joiner.on(", ").join(missingTbls)));
+      }
       getCatalog().waitForCatalogUpdate(MAX_CATALOG_UPDATE_WAIT_TIME_MS);
       missingTbls = getMissingTbls(missingTbls);
       // TODO: Check for query cancellation here.
@@ -879,7 +881,7 @@ public class Frontend {
 
     AnalysisContext analysisCtx = new AnalysisContext(impaladCatalog_, queryCtx,
         authzConfig_);
-    LOG.debug("analyze query " + queryCtx.request.stmt);
+    if (LOG.isDebugEnabled()) LOG.debug("analyze query " + queryCtx.request.stmt);
 
     // Run analysis in a loop until it any of the following events occur:
     // 1) Analysis completes successfully.
@@ -898,8 +900,10 @@ public class Frontend {
 
           // Some tables/views were missing, request and wait for them to load.
           if (!requestTblLoadAndWait(missingTbls, MISSING_TBL_LOAD_WAIT_TIMEOUT_MS)) {
-            LOG.info(String.format("Missing tables were not received in %dms. Load " +
-                "request will be retried.", MISSING_TBL_LOAD_WAIT_TIMEOUT_MS));
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(String.format("Missing tables were not received in %dms. Load " +
+                  "request will be retried.", MISSING_TBL_LOAD_WAIT_TIMEOUT_MS));
+            }
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/JniFrontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
index b343369..29cbd10 100644
--- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java
+++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
@@ -157,7 +157,9 @@ public class JniFrontend {
 
     StringBuilder explainString = new StringBuilder();
     TExecRequest result = frontend_.createExecRequest(queryCtx, explainString);
-    if (explainString.length() > 0) LOG.debug(explainString.toString());
+    if (explainString.length() > 0 && LOG.isDebugEnabled()) {
+      LOG.debug(explainString.toString());
+    }
 
     // TODO: avoid creating serializer for each query?
     TSerializer serializer = new TSerializer(protocolFactory_);
@@ -232,7 +234,7 @@ public class JniFrontend {
     TQueryCtx queryCtx = new TQueryCtx();
     JniUtil.deserializeThrift(protocolFactory_, queryCtx, thriftQueryContext);
     String plan = frontend_.getExplainString(queryCtx);
-    LOG.debug("Explain plan: " + plan);
+    if (LOG.isDebugEnabled()) LOG.debug("Explain plan: " + plan);
     return plan;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
index 82fcab8..e3f9a7f 100644
--- a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
@@ -70,8 +70,10 @@ public class KuduCatalogOpExecutor {
     Preconditions.checkState(!Table.isExternalTable(msTbl));
     String kuduTableName = msTbl.getParameters().get(KuduTable.KEY_TABLE_NAME);
     String masterHosts = msTbl.getParameters().get(KuduTable.KEY_MASTER_HOSTS);
-    LOG.debug(String.format("Creating table '%s' in master '%s'", kuduTableName,
-        masterHosts));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Creating table '%s' in master '%s'", kuduTableName,
+          masterHosts));
+    }
     try (KuduClient kudu = KuduUtil.createKuduClient(masterHosts)) {
       // TODO: The IF NOT EXISTS case should be handled by Kudu to ensure atomicity.
       // (see KUDU-1710).
@@ -191,8 +193,10 @@ public class KuduCatalogOpExecutor {
     Preconditions.checkState(!Table.isExternalTable(msTbl));
     String tableName = msTbl.getParameters().get(KuduTable.KEY_TABLE_NAME);
     String masterHosts = msTbl.getParameters().get(KuduTable.KEY_MASTER_HOSTS);
-    LOG.debug(String.format("Dropping table '%s' from master '%s'", tableName,
-        masterHosts));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Dropping table '%s' from master '%s'", tableName,
+          masterHosts));
+    }
     try (KuduClient kudu = KuduUtil.createKuduClient(masterHosts)) {
       Preconditions.checkState(!Strings.isNullOrEmpty(tableName));
       // TODO: The IF EXISTS case should be handled by Kudu to ensure atomicity.
@@ -220,8 +224,10 @@ public class KuduCatalogOpExecutor {
     String kuduTableName = msTblCopy.getParameters().get(KuduTable.KEY_TABLE_NAME);
     Preconditions.checkState(!Strings.isNullOrEmpty(kuduTableName));
     String masterHosts = msTblCopy.getParameters().get(KuduTable.KEY_MASTER_HOSTS);
-    LOG.debug(String.format("Loading schema of table '%s' from master '%s'",
-        kuduTableName, masterHosts));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Loading schema of table '%s' from master '%s'",
+          kuduTableName, masterHosts));
+    }
     try (KuduClient kudu = KuduUtil.createKuduClient(masterHosts)) {
       if (!kudu.tableExists(kuduTableName)) {
         throw new ImpalaRuntimeException(String.format("Table does not exist in Kudu: " +

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/MetadataOp.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/MetadataOp.java b/fe/src/main/java/org/apache/impala/service/MetadataOp.java
index c24f153..16166ee 100644
--- a/fe/src/main/java/org/apache/impala/service/MetadataOp.java
+++ b/fe/src/main/java/org/apache/impala/service/MetadataOp.java
@@ -375,7 +375,9 @@ public class MetadataOp {
         }
       }
     }
-    LOG.debug("Returning " + result.rows.size() + " table columns");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Returning " + result.rows.size() + " table columns");
+    }
     return result;
   }
 
@@ -426,7 +428,9 @@ public class MetadataOp {
       result.rows.add(row);
     }
 
-    LOG.debug("Returning " + result.rows.size() + " schemas");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Returning " + result.rows.size() + " schemas");
+    }
     return result;
   }
 
@@ -479,7 +483,7 @@ public class MetadataOp {
         result.rows.add(row);
       }
     }
-    LOG.debug("Returning " + result.rows.size() + " tables");
+    if (LOG.isDebugEnabled()) LOG.debug("Returning " + result.rows.size() + " tables");
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java b/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
index 8c7b1cc..579091c 100644
--- a/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
+++ b/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
@@ -287,9 +287,12 @@ public class FsPermissionChecker {
     try {
       aclStatus = fs.getAclStatus(path);
     } catch (AclException ex) {
-      LOG.trace("No ACLs retrieved, skipping ACLs check (HDFS will enforce ACLs)", ex);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(
+            "No ACLs retrieved, skipping ACLs check (HDFS will enforce ACLs)", ex);
+      }
     } catch (UnsupportedOperationException ex) {
-      LOG.trace("No ACLs retrieved, unsupported", ex);
+      if (LOG.isTraceEnabled()) LOG.trace("No ACLs retrieved, unsupported", ex);
     }
     return new Permissions(fs.getFileStatus(path), aclStatus);
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
index 0ee7d28..acbc53a 100644
--- a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
@@ -130,7 +130,9 @@ public class HdfsCachingUtil {
   public static void uncacheTbl(org.apache.hadoop.hive.metastore.api.Table table)
       throws ImpalaRuntimeException {
     Preconditions.checkNotNull(table);
-    LOG.debug("Uncaching table: " + table.getDbName() + "." + table.getTableName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Uncaching table: " + table.getDbName() + "." + table.getTableName());
+    }
     Long id = getCacheDirectiveId(table.getParameters());
     if (id == null) return;
     HdfsCachingUtil.removeDirective(id);
@@ -236,8 +238,10 @@ public class HdfsCachingUtil {
 
     bytesNeeded = cacheDir.getStats().getBytesNeeded();
     currentBytesCached = cacheDir.getStats().getBytesCached();
-    LOG.debug(String.format("Waiting on cache directive id: %d. Bytes " +
-        "cached (%d) / needed (%d)", directiveId, currentBytesCached, bytesNeeded));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Waiting on cache directive id: %d. Bytes " +
+          "cached (%d) / needed (%d)", directiveId, currentBytesCached, bytesNeeded));
+    }
     // All the bytes are cached, just return.
     if (bytesNeeded == currentBytesCached) return;
 
@@ -258,9 +262,11 @@ public class HdfsCachingUtil {
       currentBytesCached = cacheDir.getStats().getBytesCached();
       bytesNeeded = cacheDir.getStats().getBytesNeeded();
       if (currentBytesCached == bytesNeeded) {
-        LOG.debug(String.format("Cache directive id: %d has completed." +
-            "Bytes cached (%d) / needed (%d)", directiveId, currentBytesCached,
-            bytesNeeded));
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format("Cache directive id: %d has completed." +
+              "Bytes cached (%d) / needed (%d)", directiveId, currentBytesCached,
+              bytesNeeded));
+        }
         return;
       }
 
@@ -295,7 +301,9 @@ public class HdfsCachingUtil {
         .setPool(poolName)
         .setReplication(replication)
         .setPath(path).build();
-    LOG.debug("Submitting cache directive: " + info.toString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Submitting cache directive: " + info.toString());
+    }
     try {
       return getDfs().addCacheDirective(info);
     } catch (IOException e) {
@@ -347,7 +355,9 @@ public class HdfsCachingUtil {
         .setPool(poolName)
         .setReplication(replication)
         .setPath(path).build();
-    LOG.debug("Modifying cache directive: " + info.toString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Modifying cache directive: " + info.toString());
+    }
     try {
       getDfs().modifyCacheDirective(info);
     } catch (IOException e) {
@@ -362,7 +372,7 @@ public class HdfsCachingUtil {
    * directive.
    */
   private static void removeDirective(long directiveId) throws ImpalaRuntimeException {
-    LOG.debug("Removing cache directive id: " + directiveId);
+    if (LOG.isDebugEnabled()) LOG.debug("Removing cache directive id: " + directiveId);
     try {
       getDfs().removeCacheDirective(directiveId);
     } catch (IOException e) {
@@ -379,7 +389,9 @@ public class HdfsCachingUtil {
    */
   private static CacheDirectiveEntry getDirective(long directiveId)
       throws ImpalaRuntimeException {
-    LOG.trace("Getting cache directive id: " + directiveId);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Getting cache directive id: " + directiveId);
+    }
     CacheDirectiveInfo filter = new CacheDirectiveInfo.Builder()
         .setId(directiveId)
         .build();

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
index 6968f33..95ef700 100644
--- a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
@@ -126,8 +126,10 @@ public class MetaStoreUtil {
   public static List<Partition> fetchPartitionsByName(
       IMetaStoreClient client, List<String> partNames, String dbName, String tblName)
       throws MetaException, TException {
-    LOG.trace(String.format("Fetching %d partitions for: %s.%s using partition " +
-        "batch size: %d", partNames.size(), dbName, tblName, maxPartitionsPerRpc_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Fetching %d partitions for: %s.%s using partition " +
+          "batch size: %d", partNames.size(), dbName, tblName, maxPartitionsPerRpc_));
+    }
 
     List<org.apache.hadoop.hive.metastore.api.Partition> fetchedPartitions =
         Lists.newArrayList();

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
index c1e8224..94c0c33 100644
--- a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
+++ b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
@@ -274,9 +274,12 @@ public class RequestPoolService {
     JniUtil.deserializeThrift(protocolFactory_, resolvePoolParams,
         thriftResolvePoolParams);
     TResolveRequestPoolResult result = resolveRequestPool(resolvePoolParams);
-    LOG.info("resolveRequestPool(pool={}, user={}): resolved_pool={}, has_access={}",
-        new Object[] { resolvePoolParams.getRequested_pool(), resolvePoolParams.getUser(),
-                       result.resolved_pool, result.has_access });
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("resolveRequestPool(pool={}, user={}): resolved_pool={}, has_access={}",
+          new Object[] {
+            resolvePoolParams.getRequested_pool(), resolvePoolParams.getUser(),
+            result.resolved_pool, result.has_access });
+    }
     try {
       return new TSerializer(protocolFactory_).serialize(result);
     } catch (TException e) {
@@ -372,10 +375,12 @@ public class RequestPoolService {
       result.setDefault_query_options(getLlamaPoolConfigValue(currentLlamaConf, pool,
           QUERY_OPTIONS_KEY, ""));
     }
-    LOG.info("getPoolConfig(pool={}): max_mem_resources={}, max_requests={}, " +
-        "max_queued={},  queue_timeout_ms={}, default_query_options={}",
-        new Object[] { pool, result.max_mem_resources, result.max_requests,
-            result.max_queued, result.queue_timeout_ms, result.default_query_options });
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("getPoolConfig(pool={}): max_mem_resources={}, max_requests={}, " +
+          "max_queued={},  queue_timeout_ms={}, default_query_options={}",
+          new Object[] { pool, result.max_mem_resources, result.max_requests,
+              result.max_queued, result.queue_timeout_ms, result.default_query_options });
+    }
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java b/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
index 083ad48..a1290cc 100644
--- a/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
+++ b/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
@@ -110,8 +110,10 @@ public class SentryPolicyService {
    */
   public void dropRole(User requestingUser, String roleName, boolean ifExists)
       throws ImpalaException {
-    LOG.trace(String.format("Dropping role: %s on behalf of: %s", roleName,
-        requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Dropping role: %s on behalf of: %s", roleName,
+          requestingUser.getName()));
+    }
     SentryServiceClient client = new SentryServiceClient();
     try {
       if (ifExists) {
@@ -139,8 +141,10 @@ public class SentryPolicyService {
    */
   public void createRole(User requestingUser, String roleName, boolean ifNotExists)
       throws ImpalaException {
-    LOG.trace(String.format("Creating role: %s on behalf of: %s", roleName,
-        requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Creating role: %s on behalf of: %s", roleName,
+          requestingUser.getName()));
+    }
     SentryServiceClient client = new SentryServiceClient();
     try {
       client.get().createRole(requestingUser.getShortName(), roleName);
@@ -167,8 +171,10 @@ public class SentryPolicyService {
    */
   public void grantRoleToGroup(User requestingUser, String roleName, String groupName)
       throws ImpalaException {
-    LOG.trace(String.format("Granting role '%s' to group '%s' on behalf of: %s",
-        roleName, groupName, requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Granting role '%s' to group '%s' on behalf of: %s",
+          roleName, groupName, requestingUser.getName()));
+    }
     SentryServiceClient client = new SentryServiceClient();
     try {
       client.get().grantRoleToGroup(requestingUser.getShortName(), groupName, roleName);
@@ -193,8 +199,10 @@ public class SentryPolicyService {
    */
   public void revokeRoleFromGroup(User requestingUser, String roleName, String groupName)
       throws ImpalaException {
-    LOG.trace(String.format("Revoking role '%s' from group '%s' on behalf of: %s",
-        roleName, groupName, requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Revoking role '%s' from group '%s' on behalf of: %s",
+          roleName, groupName, requestingUser.getName()));
+    }
     SentryServiceClient client = new SentryServiceClient();
     try {
       client.get().revokeRoleFromGroup(requestingUser.getShortName(),
@@ -231,9 +239,12 @@ public class SentryPolicyService {
     Preconditions.checkState(!privileges.isEmpty());
     TPrivilege privilege = privileges.get(0);
     TPrivilegeScope scope = privilege.getScope();
-    LOG.trace(String.format("Granting role '%s' '%s' privilege on '%s' on behalf of: %s",
-        roleName, privilege.getPrivilege_level().toString(), scope.toString(),
-        requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format(
+          "Granting role '%s' '%s' privilege on '%s' on behalf of: %s",
+          roleName, privilege.getPrivilege_level().toString(), scope.toString(),
+          requestingUser.getName()));
+    }
     // Verify that all privileges have the same scope.
     for (int i = 1; i < privileges.size(); ++i) {
       Preconditions.checkState(privileges.get(i).getScope() == scope, "All the " +
@@ -306,9 +317,11 @@ public class SentryPolicyService {
     Preconditions.checkState(!privileges.isEmpty());
     TPrivilege privilege = privileges.get(0);
     TPrivilegeScope scope = privilege.getScope();
-    LOG.trace(String.format("Revoking from role '%s' '%s' privilege on '%s' on " +
-        "behalf of: %s", roleName, privilege.getPrivilege_level().toString(),
-        scope.toString(), requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Revoking from role '%s' '%s' privilege on '%s' on " +
+          "behalf of: %s", roleName, privilege.getPrivilege_level().toString(),
+          scope.toString(), requestingUser.getName()));
+    }
     // Verify that all privileges have the same scope.
     for (int i = 1; i < privileges.size(); ++i) {
       Preconditions.checkState(privileges.get(i).getScope() == scope, "All the " +


[3/5] incubator-impala git commit: IMPALA-4458: Fix resource cleanup of cancelled mt scan nodes.

Posted by kw...@apache.org.
IMPALA-4458: Fix resource cleanup of cancelled mt scan nodes.

The bug was that HdfsScanNodeMt::Close() did not properly
clean up all in-flight resources when called through the
query cancellation path.

The main change is to clean up all resources when passing
a NULL batch into HdfsparquetScanner::Close() which also
needed similar changes in the scanner context.

Testing: Ran test_cancellation.py, test_scanners.py and
test_nested_types.py with MT_DOP=3. Added a test query
with a limit that was failing before.
A regular private hdfs/core test run succeeded.

Change-Id: Ib32f87b3289ed9e8fc2db0885675845e11207438
Reviewed-on: http://gerrit.cloudera.org:8080/5274
Reviewed-by: Alex Behm <al...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/c97bffcc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/c97bffcc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/c97bffcc

Branch: refs/heads/master
Commit: c97bffcce1e3d053ad6152dae300bf5233507f34
Parents: 6d8fd7e
Author: Alex Behm <al...@cloudera.com>
Authored: Tue Nov 15 18:27:20 2016 -0800
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Thu Dec 1 11:04:48 2016 +0000

----------------------------------------------------------------------
 be/src/exec/hdfs-parquet-scanner.cc             | 25 ++++++++++----------
 be/src/exec/hdfs-scan-node-mt.cc                |  1 +
 be/src/exec/parquet-column-readers.h            | 10 ++++----
 be/src/exec/scanner-context.cc                  | 21 ++++++++--------
 be/src/exec/scanner-context.h                   | 17 ++++++-------
 .../queries/QueryTest/mt-dop-parquet.test       | 14 +++++++++++
 6 files changed, 53 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c97bffcc/be/src/exec/hdfs-parquet-scanner.cc
----------------------------------------------------------------------
diff --git a/be/src/exec/hdfs-parquet-scanner.cc b/be/src/exec/hdfs-parquet-scanner.cc
index 1f8ff1a..f16f9d9 100644
--- a/be/src/exec/hdfs-parquet-scanner.cc
+++ b/be/src/exec/hdfs-parquet-scanner.cc
@@ -229,25 +229,31 @@ Status HdfsParquetScanner::Open(ScannerContext* context) {
 }
 
 void HdfsParquetScanner::Close(RowBatch* row_batch) {
-  if (row_batch != NULL) {
+  if (row_batch != nullptr) {
     FlushRowGroupResources(row_batch);
     row_batch->tuple_data_pool()->AcquireData(template_tuple_pool_.get(), false);
     if (scan_node_->HasRowBatchQueue()) {
       static_cast<HdfsScanNode*>(scan_node_)->AddMaterializedRowBatch(row_batch);
     }
   } else {
-    if (template_tuple_pool_.get() != NULL) template_tuple_pool_->FreeAll();
-    if (!FLAGS_enable_partitioned_hash_join ||
-        !FLAGS_enable_partitioned_aggregation) {
+    if (template_tuple_pool_ != nullptr) template_tuple_pool_->FreeAll();
+    dictionary_pool_.get()->FreeAll();
+    context_->ReleaseCompletedResources(nullptr, true);
+    for (ParquetColumnReader* col_reader: column_readers_) col_reader->Close(nullptr);
+    if (!FLAGS_enable_partitioned_hash_join || !FLAGS_enable_partitioned_aggregation) {
       // With the legacy aggs/joins the tuple ptrs of the scratch batch are allocated
       // from the scratch batch's mem pool. We can get into this case if Open() fails.
       scratch_batch_->mem_pool()->FreeAll();
     }
   }
+  if (level_cache_pool_ != nullptr) {
+    level_cache_pool_->FreeAll();
+    level_cache_pool_.reset();
+  }
 
   // Verify all resources (if any) have been transferred.
-  DCHECK_EQ(template_tuple_pool_.get()->total_allocated_bytes(), 0);
-  DCHECK_EQ(dictionary_pool_.get()->total_allocated_bytes(), 0);
+  DCHECK_EQ(template_tuple_pool_->total_allocated_bytes(), 0);
+  DCHECK_EQ(dictionary_pool_->total_allocated_bytes(), 0);
   DCHECK_EQ(scratch_batch_->mem_pool()->total_allocated_bytes(), 0);
   DCHECK_EQ(context_->num_completed_io_buffers(), 0);
 
@@ -273,12 +279,7 @@ void HdfsParquetScanner::Close(RowBatch* row_batch) {
   if (compression_types.empty()) compression_types.push_back(THdfsCompression::NONE);
   scan_node_->RangeComplete(THdfsFileFormat::PARQUET, compression_types);
 
-  if (level_cache_pool_.get() != NULL) {
-    level_cache_pool_->FreeAll();
-    level_cache_pool_.reset();
-  }
-
-  if (schema_resolver_.get() != NULL) schema_resolver_.reset();
+  if (schema_resolver_.get() != nullptr) schema_resolver_.reset();
 
   for (int i = 0; i < filter_ctxs_.size(); ++i) {
     const FilterStats* stats = filter_ctxs_[i]->stats;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c97bffcc/be/src/exec/hdfs-scan-node-mt.cc
----------------------------------------------------------------------
diff --git a/be/src/exec/hdfs-scan-node-mt.cc b/be/src/exec/hdfs-scan-node-mt.cc
index 84e3eef..bde9f81 100644
--- a/be/src/exec/hdfs-scan-node-mt.cc
+++ b/be/src/exec/hdfs-scan-node-mt.cc
@@ -118,6 +118,7 @@ Status HdfsScanNodeMt::GetNext(RuntimeState* state, RowBatch* row_batch, bool* e
 
 void HdfsScanNodeMt::Close(RuntimeState* state) {
   if (is_closed()) return;
+  if (scanner_.get() != nullptr) scanner_->Close(nullptr);
   scanner_.reset();
   scanner_ctx_.reset();
   HdfsScanNodeBase::Close(state);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c97bffcc/be/src/exec/parquet-column-readers.h
----------------------------------------------------------------------
diff --git a/be/src/exec/parquet-column-readers.h b/be/src/exec/parquet-column-readers.h
index 9eefc1a..85acb88 100644
--- a/be/src/exec/parquet-column-readers.h
+++ b/be/src/exec/parquet-column-readers.h
@@ -227,8 +227,8 @@ class ParquetColumnReader {
   /// Returns true if this column reader has reached the end of the row group.
   inline bool RowGroupAtEnd() { return rep_level_ == HdfsParquetScanner::ROW_GROUP_END; }
 
-  /// Transfers the remaining resources backing tuples to the given row batch,
-  /// and frees up other resources.
+  /// If 'row_batch' is non-NULL, transfers the remaining resources backing tuples to it,
+  /// and frees up other resources. If 'row_batch' is NULL frees all resources instead.
   virtual void Close(RowBatch* row_batch) = 0;
 
  protected:
@@ -343,10 +343,12 @@ class BaseScalarColumnReader : public ParquetColumnReader {
   }
 
   virtual void Close(RowBatch* row_batch) {
-    if (decompressed_data_pool_.get() != NULL) {
+    if (row_batch != nullptr) {
       row_batch->tuple_data_pool()->AcquireData(decompressed_data_pool_.get(), false);
+    } else {
+      decompressed_data_pool_->FreeAll();
     }
-    if (decompressor_.get() != NULL) decompressor_->Close();
+    if (decompressor_ != nullptr) decompressor_->Close();
   }
 
   int64_t total_len() const { return metadata_->total_compressed_size; }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c97bffcc/be/src/exec/scanner-context.cc
----------------------------------------------------------------------
diff --git a/be/src/exec/scanner-context.cc b/be/src/exec/scanner-context.cc
index 8e8e84c..66f112d 100644
--- a/be/src/exec/scanner-context.cc
+++ b/be/src/exec/scanner-context.cc
@@ -92,37 +92,36 @@ ScannerContext::Stream* ScannerContext::AddStream(DiskIoMgr::ScanRange* range) {
 }
 
 void ScannerContext::Stream::ReleaseCompletedResources(RowBatch* batch, bool done) {
-  DCHECK((batch != NULL) || (batch == NULL && !contains_tuple_data_));
+  DCHECK(batch != nullptr || done);
   if (done) {
     // Mark any pending resources as completed
-    if (io_buffer_ != NULL) {
+    if (io_buffer_ != nullptr) {
       ++parent_->num_completed_io_buffers_;
       completed_io_buffers_.push_back(io_buffer_);
     }
-    // Set variables to NULL to make sure streams are not used again
-    io_buffer_ = NULL;
-    io_buffer_pos_ = NULL;
+    // Set variables to nullptr to make sure streams are not used again
+    io_buffer_ = nullptr;
+    io_buffer_pos_ = nullptr;
     io_buffer_bytes_left_ = 0;
     // Cancel the underlying scan range to clean up any queued buffers there
     scan_range_->Cancel(Status::CANCELLED);
   }
 
-  for (list<DiskIoMgr::BufferDescriptor*>::iterator it = completed_io_buffers_.begin();
-       it != completed_io_buffers_.end(); ++it) {
-    if (contains_tuple_data_) {
-      batch->AddIoBuffer(*it);
+  for (DiskIoMgr::BufferDescriptor* buffer: completed_io_buffers_) {
+    if (contains_tuple_data_ && batch != nullptr) {
+      batch->AddIoBuffer(buffer);
       // TODO: We can do row batch compaction here.  This is the only place io buffers are
       // queued.  A good heuristic is to check the number of io buffers queued and if
       // there are too many, we should compact.
     } else {
-      (*it)->Return();
+      buffer->Return();
       parent_->scan_node_->num_owned_io_buffers_.Add(-1);
     }
   }
   parent_->num_completed_io_buffers_ -= completed_io_buffers_.size();
   completed_io_buffers_.clear();
 
-  if (contains_tuple_data_) {
+  if (contains_tuple_data_ && batch != nullptr) {
     // If we're not done, keep using the last chunk allocated in boundary_pool_ so we
     // don't have to reallocate. If we are done, transfer it to the row batch.
     batch->tuple_data_pool()->AcquireData(boundary_pool_.get(), /* keep_current */ !done);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c97bffcc/be/src/exec/scanner-context.h
----------------------------------------------------------------------
diff --git a/be/src/exec/scanner-context.h b/be/src/exec/scanner-context.h
index 1f1bc0d..0f4e36f 100644
--- a/be/src/exec/scanner-context.h
+++ b/be/src/exec/scanner-context.h
@@ -254,9 +254,11 @@ class ScannerContext {
     /// never set to NULL, even if it contains 0 bytes.
     Status GetNextBuffer(int64_t read_past_size = 0);
 
-    /// If 'batch' is not NULL, attaches all completed io buffers and the boundary mem
-    /// pool to batch.  If 'done' is set, releases the completed resources.
-    /// If 'batch' is NULL then contains_tuple_data_ should be false.
+    /// If 'batch' is not NULL and 'contains_tuple_data_' is true, attaches all completed
+    /// io buffers and the boundary mem pool to 'batch'. If 'done' is set, all in-flight
+    /// resources are also attached or released.
+    /// If 'batch' is NULL then 'done' must be true. Such a call will release all
+    /// completed and in-flight resources.
     void ReleaseCompletedResources(RowBatch* batch, bool done);
 
     /// Error-reporting functions.
@@ -275,14 +277,13 @@ class ScannerContext {
   /// from all streams to 'batch'. Attaching only completed resources ensures that buffers
   /// (and their cleanup) trail the rows that reference them (row batches are consumed and
   /// cleaned up in order by the rest of the query).
-  /// If a NULL 'batch' is passed, then it tries to release whatever resource can be
-  /// released, ie. completed io buffers if 'done' is not set, and the mem pool if 'done'
-  /// is set. In that case, contains_tuple_data_ should be false.
-  //
   /// If 'done' is true, this is the final call for the current streams and any pending
   /// resources in each stream are also passed to the row batch. Callers which want to
   /// clear the streams from the context should also call ClearStreams().
-  //
+  ///
+  /// A NULL 'batch' may be passed to free all resources. It is only valid to pass a NULL
+  /// 'batch' when also passing 'done'.
+  ///
   /// This must be called with 'done' set when the scanner is complete and no longer needs
   /// any resources (e.g. tuple memory, io buffers) returned from the current streams.
   /// After calling with 'done' set, this should be called again if new streams are

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c97bffcc/testdata/workloads/functional-query/queries/QueryTest/mt-dop-parquet.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/mt-dop-parquet.test b/testdata/workloads/functional-query/queries/QueryTest/mt-dop-parquet.test
index cbd14ef..0523f1d 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/mt-dop-parquet.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/mt-dop-parquet.test
@@ -24,3 +24,17 @@ limit 10
 ---- TYPES
 bigint,bigint
 ====
+---- QUERY
+# IMPALA-4458: Test proper resource cleanup for cancelled fragments.
+# This test is duplicated from nested-types-subplan.test
+select c_custkey, c_mktsegment, o_orderkey, o_orderdate
+from tpch_nested_parquet.customer c, c.c_orders o
+where c_custkey = 1
+limit 3
+---- RESULTS
+1,regex:.*,regex:.*,regex:.*
+1,regex:.*,regex:.*,regex:.*
+1,regex:.*,regex:.*,regex:.*
+---- TYPES
+bigint,string,bigint,string
+====


[5/5] incubator-impala git commit: IMPALA-4557: Fix flakiness with FLAGS_stress_free_pool_alloc

Posted by kw...@apache.org.
IMPALA-4557: Fix flakiness with FLAGS_stress_free_pool_alloc

FLAGS_stress_free_pool_alloc is a gflag for simulating malloc
failure in debug builds. If set, FreePool::Allocate() and
FreePool::Reallocate() will return NULL on every
FLAGS_stress_free_pool_alloc allocations. The problem is that
the counter for tracking number of allocations is updated
racily by multiple threads, leading to non-determinism and
flaky tests. This change fixes the problem by making the counter
an AtomicInt32.

Change-Id: I373035d209a0f3477650336ee6da458fa289dbe0
Reviewed-on: http://gerrit.cloudera.org:8080/5281
Reviewed-by: Jim Apple <jb...@apache.org>
Reviewed-by: Dan Hecht <dh...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/b1edca2a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/b1edca2a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/b1edca2a

Branch: refs/heads/master
Commit: b1edca2a5b6eb66fc5c316157f968b267ee36b48
Parents: 467642f
Author: Michael Ho <kw...@cloudera.com>
Authored: Tue Nov 29 15:27:56 2016 -0800
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Thu Dec 1 12:03:17 2016 +0000

----------------------------------------------------------------------
 be/src/common/atomic.h        |  5 +++--
 be/src/runtime/CMakeLists.txt |  1 +
 be/src/runtime/free-pool.cc   | 28 ++++++++++++++++++++++++++++
 be/src/runtime/free-pool.h    | 13 +++++++++----
 4 files changed, 41 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b1edca2a/be/src/common/atomic.h
----------------------------------------------------------------------
diff --git a/be/src/common/atomic.h b/be/src/common/atomic.h
index 1afdedf..784be11 100644
--- a/be/src/common/atomic.h
+++ b/be/src/common/atomic.h
@@ -71,8 +71,9 @@ template<typename T>
 class AtomicInt {
  public:
   AtomicInt(T initial = 0) : value_(initial) {
-    DCHECK(sizeof(T) == sizeof(base::subtle::Atomic32) ||
-        sizeof(T) == sizeof(base::subtle::Atomic64));
+    static_assert(sizeof(T) == sizeof(base::subtle::Atomic32) ||
+        sizeof(T) == sizeof(base::subtle::Atomic64),
+            "Only AtomicInt32 and AtomicInt64 are implemented");
   }
 
   /// Atomic load with "acquire" memory-ordering semantic.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b1edca2a/be/src/runtime/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/be/src/runtime/CMakeLists.txt b/be/src/runtime/CMakeLists.txt
index 5114ff9..6602f07 100644
--- a/be/src/runtime/CMakeLists.txt
+++ b/be/src/runtime/CMakeLists.txt
@@ -37,6 +37,7 @@ add_library(Runtime
   disk-io-mgr-scan-range.cc
   disk-io-mgr-stress.cc
   exec-env.cc
+  free-pool.cc
   hbase-table.cc
   hbase-table-factory.cc
   hdfs-fs-cache.cc

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b1edca2a/be/src/runtime/free-pool.cc
----------------------------------------------------------------------
diff --git a/be/src/runtime/free-pool.cc b/be/src/runtime/free-pool.cc
new file mode 100644
index 0000000..df0569d
--- /dev/null
+++ b/be/src/runtime/free-pool.cc
@@ -0,0 +1,28 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "runtime/free-pool.h"
+
+#include "common/names.h"
+
+using namespace impala;
+
+#ifndef NDEBUG
+
+AtomicInt32 FreePool::alloc_counts_(0);
+
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b1edca2a/be/src/runtime/free-pool.h
----------------------------------------------------------------------
diff --git a/be/src/runtime/free-pool.h b/be/src/runtime/free-pool.h
index 746b649..98b1ebe 100644
--- a/be/src/runtime/free-pool.h
+++ b/be/src/runtime/free-pool.h
@@ -23,6 +23,7 @@
 #include <string.h>
 #include <string>
 #include <sstream>
+#include "common/atomic.h"
 #include "common/logging.h"
 #include "gutil/bits.h"
 #include "runtime/mem-pool.h"
@@ -60,9 +61,8 @@ class FreePool {
   uint8_t* Allocate(int64_t size) {
     DCHECK_GE(size, 0);
 #ifndef NDEBUG
-    static int32_t alloc_counts = 0;
     if (FLAGS_stress_free_pool_alloc > 0 &&
-        (++alloc_counts % FLAGS_stress_free_pool_alloc) == 0) {
+        (alloc_counts_.Add(1) % FLAGS_stress_free_pool_alloc) == 0) {
       return NULL;
     }
 #endif
@@ -121,9 +121,8 @@ class FreePool {
   /// free the memory buffer pointed to by "ptr" in this case.
   uint8_t* Reallocate(uint8_t* ptr, int64_t size) {
 #ifndef NDEBUG
-    static int32_t alloc_counts = 0;
     if (FLAGS_stress_free_pool_alloc > 0 &&
-        (++alloc_counts % FLAGS_stress_free_pool_alloc) == 0) {
+        (alloc_counts_.Add(1) % FLAGS_stress_free_pool_alloc) == 0) {
       return NULL;
     }
 #endif
@@ -209,6 +208,12 @@ class FreePool {
 
   /// Diagnostic counter that tracks (# Allocates - # Frees)
   int64_t net_allocations_;
+
+#ifndef NDEBUG
+  /// Counter for tracking the number of allocations. Used only if the
+  /// the stress flag FLAGS_stress_free_pool_alloc is set.
+  static AtomicInt32 alloc_counts_;
+#endif
 };
 
 }


[4/5] incubator-impala git commit: Bracketing Java logging output with log level checks part 2.

Posted by kw...@apache.org.
Bracketing Java logging output with log level checks part 2.

This reduces creation of intermediate objects and improves performance.

Change-Id: Ib94b3a20d439d854f579d9086755eb19699fcb68
Reviewed-on: http://gerrit.cloudera.org:8080/5297
Reviewed-by: Marcel Kornacker <ma...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/467642f2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/467642f2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/467642f2

Branch: refs/heads/master
Commit: 467642f2cab9c341aba5955d8bfd88565649b3ff
Parents: c97bffc
Author: Alex Behm <al...@cloudera.com>
Authored: Wed Nov 30 17:39:54 2016 -0800
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Thu Dec 1 11:10:02 2016 +0000

----------------------------------------------------------------------
 .../apache/impala/analysis/AggregateInfo.java   | 23 +++++---
 .../impala/analysis/AggregateInfoBase.java      | 12 ++--
 .../apache/impala/analysis/AlterTableStmt.java  |  6 +-
 .../apache/impala/analysis/AnalysisContext.java |  6 +-
 .../apache/impala/analysis/AnalyticInfo.java    | 10 ++--
 .../org/apache/impala/analysis/Analyzer.java    | 59 ++++++++++----------
 .../impala/analysis/ComputeStatsStmt.java       | 20 ++++---
 .../impala/analysis/CreateDropRoleStmt.java     |  4 +-
 .../analysis/CreateOrAlterViewStmtBase.java     |  3 +-
 .../apache/impala/analysis/CreateUdfStmt.java   |  5 +-
 .../impala/analysis/DropFunctionStmt.java       |  5 +-
 .../impala/analysis/ExprSubstitutionMap.java    |  4 +-
 .../apache/impala/analysis/InlineViewRef.java   |  8 ++-
 .../org/apache/impala/analysis/LiteralExpr.java |  2 +-
 .../apache/impala/analysis/LoadDataStmt.java    |  8 +--
 .../org/apache/impala/analysis/ModifyStmt.java  |  1 -
 .../apache/impala/analysis/PartitionSpec.java   |  7 +--
 .../impala/analysis/ResetMetadataStmt.java      |  2 +-
 .../org/apache/impala/analysis/SelectStmt.java  | 47 ++++++++++------
 .../apache/impala/analysis/ShowFilesStmt.java   |  8 +--
 .../apache/impala/analysis/StmtRewriter.java    |  2 +-
 .../apache/impala/analysis/TruncateStmt.java    |  8 +--
 .../org/apache/impala/analysis/UnionStmt.java   |  4 +-
 .../authorization/AuthorizationChecker.java     |  8 +--
 .../impala/authorization/AuthorizeableDb.java   |  2 +-
 .../impala/authorization/AuthorizeableUri.java  |  2 +-
 .../authorization/ImpalaInternalAdminUser.java  |  2 +-
 .../apache/impala/authorization/Privilege.java  |  2 +-
 .../impala/authorization/PrivilegeRequest.java  |  2 +-
 .../org/apache/impala/authorization/User.java   | 11 ++--
 30 files changed, 148 insertions(+), 135 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
index 8590872..1ce5833 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
@@ -20,14 +20,13 @@ package org.apache.impala.analysis;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.planner.DataPartition;
-import org.apache.impala.thrift.TPartitionType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -181,7 +180,7 @@ public class AggregateInfo extends AggregateInfoBase {
       Preconditions.checkState(tupleDesc == null);
       result.createDistinctAggInfo(groupingExprs, distinctAggExprs, analyzer);
     }
-    LOG.debug("agg info:\n" + result.debugString());
+    if (LOG.isDebugEnabled()) LOG.debug("agg info:\n" + result.debugString());
     return result;
   }
 
@@ -338,7 +337,9 @@ public class AggregateInfo extends AggregateInfoBase {
 
     // Preserve the root type for NULL literals.
     groupingExprs_ = Expr.substituteList(groupingExprs_, smap, analyzer, true);
-    LOG.trace("AggInfo: grouping_exprs=" + Expr.debugString(groupingExprs_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("AggInfo: grouping_exprs=" + Expr.debugString(groupingExprs_));
+    }
 
     // The smap in this case should not substitute the aggs themselves, only
     // their subexpressions.
@@ -349,7 +350,9 @@ public class AggregateInfo extends AggregateInfoBase {
       aggregateExprs_.add((FunctionCallExpr) substitutedAgg);
     }
 
-    LOG.trace("AggInfo: agg_exprs=" + Expr.debugString(aggregateExprs_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("AggInfo: agg_exprs=" + Expr.debugString(aggregateExprs_));
+    }
     outputTupleSmap_.substituteLhs(smap, analyzer);
     intermediateTupleSmap_.substituteLhs(smap, analyzer);
     if (secondPhaseDistinctAggInfo_ != null) {
@@ -595,8 +598,10 @@ public class AggregateInfo extends AggregateInfoBase {
     }
     if (!requiresIntermediateTuple()) intermediateTupleSmap_ = outputTupleSmap_;
 
-    LOG.trace("output smap=" + outputTupleSmap_.debugString());
-    LOG.trace("intermediate smap=" + intermediateTupleSmap_.debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("output smap=" + outputTupleSmap_.debugString());
+      LOG.trace("intermediate smap=" + intermediateTupleSmap_.debugString());
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java b/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
index 51c56ae..d6ef084 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
@@ -20,12 +20,11 @@ package org.apache.impala.analysis;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.impala.catalog.AggregateFunction;
+import org.apache.impala.catalog.Type;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.impala.catalog.AggregateFunction;
-import org.apache.impala.catalog.ColumnStats;
-import org.apache.impala.catalog.Type;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -169,8 +168,11 @@ public abstract class AggregateInfoBase {
         }
       }
     }
-    String prefix = (isOutputTuple ? "result " : "intermediate ");
-    LOG.trace(prefix + " tuple=" + result.debugString());
+
+    if (LOG.isTraceEnabled()) {
+      String prefix = (isOutputTuple ? "result " : "intermediate ");
+      LOG.trace(prefix + " tuple=" + result.debugString());
+    }
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
index 967838e..6c7530a 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -17,18 +17,14 @@
 
 package org.apache.impala.analysis;
 
-import java.util.List;
-
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.DataSourceTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TTableName;
+
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
 
 /**
  * Base class for all ALTER TABLE statements.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
index a7f259d..e20fd34 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
@@ -51,7 +51,7 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
 /**
- * Wrapper class for parser and analyzer.
+ * Wrapper class for parsing, analyzing and rewriting a SQL stmt.
  */
 public class AnalysisContext {
   private final static Logger LOG = LoggerFactory.getLogger(AnalysisContext.class);
@@ -411,7 +411,9 @@ public class AnalysisContext {
         // Restore the original result types and column labels.
         analysisResult_.stmt_.castResultExprs(origResultTypes);
         analysisResult_.stmt_.setColLabels(origColLabels);
-        LOG.trace("rewrittenStmt: " + analysisResult_.stmt_.toSql());
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("rewrittenStmt: " + analysisResult_.stmt_.toSql());
+        }
         if (isExplain) analysisResult_.stmt_.setIsExplain();
         Preconditions.checkState(!analysisResult_.requiresSubqueryRewrite());
       }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java b/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
index cff6f35..d37152a 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
@@ -20,10 +20,10 @@ package org.apache.impala.analysis;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.impala.catalog.Type;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.impala.catalog.Type;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -97,9 +97,11 @@ public class AnalyticInfo extends AggregateInfoBase {
       result.outputTupleDesc_.getSlots().get(i).setSourceExpr(
           result.analyticExprs_.get(i));
     }
-    LOG.trace("analytictuple=" + result.outputTupleDesc_.debugString());
-    LOG.trace("analytictuplesmap=" + result.analyticTupleSmap_.debugString());
-    LOG.trace("analytic info:\n" + result.debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("analytictuple=" + result.outputTupleDesc_.debugString());
+      LOG.trace("analytictuplesmap=" + result.analyticTupleSmap_.debugString());
+      LOG.trace("analytic info:\n" + result.debugString());
+    }
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
index 3ab367c..d4fe88d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -51,7 +51,6 @@ import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.catalog.View;
 import org.apache.impala.common.AnalysisException;
-import org.apache.impala.common.Id;
 import org.apache.impala.common.IdGenerator;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
@@ -605,8 +604,10 @@ public class Analyzer {
       globalState_.fullOuterJoinedConjuncts.put(e.getId(), currentOuterJoin);
       break;
     }
-    LOG.trace("registerFullOuterJoinedConjunct: " +
-        globalState_.fullOuterJoinedConjuncts.toString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("registerFullOuterJoinedConjunct: " +
+          globalState_.fullOuterJoinedConjuncts.toString());
+    }
   }
 
   /**
@@ -617,8 +618,10 @@ public class Analyzer {
     for (TupleId tid: tids) {
       globalState_.fullOuterJoinedTupleIds.put(tid, rhsRef);
     }
-    LOG.trace("registerFullOuterJoinedTids: " +
-        globalState_.fullOuterJoinedTupleIds.toString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("registerFullOuterJoinedTids: " +
+          globalState_.fullOuterJoinedTupleIds.toString());
+    }
   }
 
   /**
@@ -628,7 +631,10 @@ public class Analyzer {
     for (TupleId tid: tids) {
       globalState_.outerJoinedTupleIds.put(tid, rhsRef);
     }
-    LOG.trace("registerOuterJoinedTids: " + globalState_.outerJoinedTupleIds.toString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("registerOuterJoinedTids: " +
+          globalState_.outerJoinedTupleIds.toString());
+    }
   }
 
   /**
@@ -1086,8 +1092,10 @@ public class Analyzer {
     // register single tid conjuncts
     if (tupleIds.size() == 1) globalState_.singleTidConjuncts.add(e.getId());
 
-    LOG.trace("register tuple/slotConjunct: " + Integer.toString(e.getId().asInt())
-        + " " + e.toSql() + " " + e.debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("register tuple/slotConjunct: " + Integer.toString(e.getId().asInt())
+      + " " + e.toSql() + " " + e.debugString());
+    }
 
     if (!(e instanceof BinaryPredicate)) return;
     BinaryPredicate binaryPred = (BinaryPredicate) e;
@@ -1138,7 +1146,9 @@ public class Analyzer {
     // create an eq predicate between lhs and rhs
     BinaryPredicate p = new BinaryPredicate(BinaryPredicate.Operator.EQ, lhs, rhs);
     p.setIsAuxExpr();
-    LOG.trace("register equiv predicate: " + p.toSql() + " " + p.debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("register equiv predicate: " + p.toSql() + " " + p.debugString());
+    }
     registerConjunct(p);
   }
 
@@ -1162,7 +1172,6 @@ public class Analyzer {
    */
   public List<Expr> getUnassignedConjuncts(
       List<TupleId> tupleIds, boolean inclOjConjuncts) {
-    LOG.trace("getUnassignedConjuncts for " + Id.printIds(tupleIds));
     List<Expr> result = Lists.newArrayList();
     for (Expr e: globalState_.conjuncts.values()) {
       if (e.isBoundByTupleIds(tupleIds)
@@ -1171,7 +1180,6 @@ public class Analyzer {
           && ((inclOjConjuncts && !e.isConstant())
               || !globalState_.ojClauseByConjunct.containsKey(e.getId()))) {
         result.add(e);
-        LOG.trace("getUnassignedConjunct: " + e.toSql());
       }
     }
     return result;
@@ -1207,13 +1215,9 @@ public class Analyzer {
    * Outer Join clause.
    */
   public List<Expr> getUnassignedConjuncts(List<TupleId> tupleIds) {
-    LOG.trace("getUnassignedConjuncts for node with " + Id.printIds(tupleIds));
     List<Expr> result = Lists.newArrayList();
     for (Expr e: getUnassignedConjuncts(tupleIds, true)) {
-      if (canEvalPredicate(tupleIds, e)) {
-        result.add(e);
-        LOG.trace("getUnassignedConjunct: " + e.toSql());
-      }
+      if (canEvalPredicate(tupleIds, e)) result.add(e);
     }
     return result;
   }
@@ -1249,7 +1253,6 @@ public class Analyzer {
         Expr e = globalState_.conjuncts.get(conjunctId);
         Preconditions.checkNotNull(e);
         result.add(e);
-        LOG.trace("getUnassignedOjConjunct: " + e.toSql());
       }
     }
     return result;
@@ -1343,8 +1346,6 @@ public class Analyzer {
    *   referenced tids the last join to outer-join this tid has been materialized
    */
   public boolean canEvalPredicate(List<TupleId> tupleIds, Expr e) {
-    LOG.trace("canEval: " + e.toSql() + " " + e.debugString() + " "
-        + Id.printIds(tupleIds));
     if (!e.isBoundByTupleIds(tupleIds)) return false;
     ArrayList<TupleId> tids = Lists.newArrayList();
     e.getIds(tids, null);
@@ -1399,14 +1400,10 @@ public class Analyzer {
     if (isAntiJoinedConjunct(e)) return canEvalAntiJoinedConjunct(e, tupleIds);
 
     for (TupleId tid: tids) {
-      LOG.trace("canEval: checking tid " + tid.toString());
       TableRef rhsRef = getLastOjClause(tid);
       // this is not outer-joined; ignore
       if (rhsRef == null) continue;
       // check whether the last join to outer-join 'tid' is materialized by tupleIds
-      boolean contains = tupleIds.containsAll(rhsRef.getAllTableRefIds());
-      LOG.trace("canEval: contains=" + (contains ? "true " : "false ")
-          + Id.printIds(tupleIds) + " " + Id.printIds(rhsRef.getAllTableRefIds()));
       if (!tupleIds.containsAll(rhsRef.getAllTableRefIds())) return false;
     }
     return true;
@@ -1525,7 +1522,9 @@ public class Analyzer {
           // to prevent callers from inadvertently marking the srcConjunct as assigned.
           p.setId(null);
           if (p instanceof BinaryPredicate) ((BinaryPredicate) p).setIsInferred();
-          LOG.trace("new pred: " + p.toSql() + " " + p.debugString());
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("new pred: " + p.toSql() + " " + p.debugString());
+          }
         }
 
         if (markAssigned) {
@@ -2033,7 +2032,6 @@ public class Analyzer {
    */
   public List<SlotId> getEquivSlots(SlotId slotId, List<TupleId> tupleIds) {
     List<SlotId> result = Lists.newArrayList();
-    LOG.trace("getequivslots: slotid=" + Integer.toString(slotId.asInt()));
     EquivalenceClassId classId = globalState_.equivClassBySlotId.get(slotId);
     for (SlotId memberId: globalState_.equivClassMembers.get(classId)) {
       if (tupleIds.contains(
@@ -2122,7 +2120,6 @@ public class Analyzer {
     if (conjuncts == null) return;
     for (Expr p: conjuncts) {
       globalState_.assignedConjuncts.add(p.getId());
-      LOG.trace("markAssigned " + p.toSql() + " " + p.debugString());
     }
   }
 
@@ -2130,7 +2127,6 @@ public class Analyzer {
    * Mark predicate as assigned.
    */
   public void markConjunctAssigned(Expr conjunct) {
-    LOG.trace("markAssigned " + conjunct.toSql() + " " + conjunct.debugString());
     globalState_.assignedConjuncts.add(conjunct.getId());
   }
 
@@ -2154,7 +2150,6 @@ public class Analyzer {
       if (globalState_.assignedConjuncts.contains(id)) continue;
       Expr e = globalState_.conjuncts.get(id);
       if (e.isAuxExpr()) continue;
-      LOG.trace("unassigned: " + e.toSql() + " " + e.debugString());
       return true;
     }
     return false;
@@ -2736,7 +2731,9 @@ public class Analyzer {
       }
 
       long end = System.currentTimeMillis();
-      LOG.trace("Time taken in computeValueTransfers(): " + (end - start) + "ms");
+      if (LOG.isDebugEnabled()) {
+        LOG.trace("Time taken in computeValueTransfers(): " + (end - start) + "ms");
+      }
     }
 
     /**
@@ -2841,7 +2838,9 @@ public class Analyzer {
           // scope of the source slot and the receiving slot's block has a limit
           Analyzer firstBlock = globalState_.blockBySlot.get(slotIds.first);
           Analyzer secondBlock = globalState_.blockBySlot.get(slotIds.second);
-          LOG.trace("value transfer: from " + slotIds.first.toString());
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("value transfer: from " + slotIds.first.toString());
+          }
           Pair<SlotId, SlotId> firstToSecond = null;
           Pair<SlotId, SlotId> secondToFirst = null;
           if (!(secondBlock.hasLimitOffsetClause_ &&

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index 7aface4..298a17d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -336,7 +336,9 @@ public class ComputeStatsStmt extends StatementBase {
           if (p.isDefaultPartition()) continue;
           TPartitionStats partStats = p.getPartitionStats();
           if (!p.hasIncrementalStats() || tableIsMissingColStats) {
-            if (partStats == null) LOG.trace(p.toString() + " does not have stats");
+            if (partStats == null) {
+              if (LOG.isTraceEnabled()) LOG.trace(p.toString() + " does not have stats");
+            }
             if (!tableIsMissingColStats) filterPreds.add(p.getConjunctSql());
             List<String> partValues = Lists.newArrayList();
             for (LiteralExpr partValue: p.getPartitionValues()) {
@@ -345,7 +347,7 @@ public class ComputeStatsStmt extends StatementBase {
             }
             expectedPartitions_.add(partValues);
           } else {
-            LOG.trace(p.toString() + " does have statistics");
+            if (LOG.isTraceEnabled()) LOG.trace(p.toString() + " does have statistics");
             validPartStats_.add(partStats);
           }
         }
@@ -377,7 +379,9 @@ public class ComputeStatsStmt extends StatementBase {
       }
 
       if (filterPreds.size() == 0 && validPartStats_.size() != 0) {
-        LOG.info("No partitions selected for incremental stats update");
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("No partitions selected for incremental stats update");
+        }
         analyzer.addWarning("No partitions selected for incremental stats update");
         return;
       }
@@ -438,18 +442,20 @@ public class ComputeStatsStmt extends StatementBase {
     }
 
     tableStatsQueryStr_ = tableStatsQueryBuilder.toString();
-    LOG.debug("Table stats query: " + tableStatsQueryStr_);
+    if (LOG.isDebugEnabled()) LOG.debug("Table stats query: " + tableStatsQueryStr_);
 
     if (columnStatsSelectList.isEmpty()) {
       // Table doesn't have any columns that we can compute stats for.
-      LOG.info("No supported column types in table " + table_.getTableName() +
-          ", no column statistics will be gathered.");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No supported column types in table " + table_.getTableName() +
+            ", no column statistics will be gathered.");
+      }
       columnStatsQueryStr_ = null;
       return;
     }
 
     columnStatsQueryStr_ = columnStatsQueryBuilder.toString();
-    LOG.debug("Column stats query: " + columnStatsQueryStr_);
+    if (LOG.isDebugEnabled()) LOG.debug("Column stats query: " + columnStatsQueryStr_);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
index 3704e97..2007509 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
@@ -20,6 +20,7 @@ package org.apache.impala.analysis;
 import org.apache.impala.catalog.Role;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TCreateDropRoleParams;
+
 import com.google.common.base.Preconditions;
 
 /**
@@ -29,9 +30,6 @@ public class CreateDropRoleStmt extends AuthorizationStmt {
   private final String roleName_;
   private final boolean isDropRole_;
 
-  // Set in analysis
-  private String user_;
-
   public CreateDropRoleStmt(String roleName, boolean isDropRole) {
     Preconditions.checkNotNull(roleName);
     roleName_ = roleName;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java b/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
index 5f524f5..1fba940 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
@@ -18,7 +18,6 @@
 package org.apache.impala.analysis;
 
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 
@@ -167,7 +166,7 @@ public abstract class CreateOrAlterViewStmtBase extends StatementBase {
     }
     graph.addTargetColumnLabels(colDefs);
     graph.computeLineageGraph(viewDefStmt_.getResultExprs(), analyzer);
-    LOG.trace("lineage: " + graph.debugString());
+    if (LOG.isTraceEnabled()) LOG.trace("lineage: " + graph.debugString());
   }
 
   public TCreateOrAlterViewParams toThrift() {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
index 4d5f3ed..644eb7c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
@@ -19,19 +19,16 @@ package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.List;
 
-import org.apache.impala.catalog.Db;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.PrimitiveType;
 import org.apache.impala.catalog.ScalarFunction;
-import org.apache.impala.catalog.ScalarType;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.hive.executor.UdfExecutor.JavaUdfDataType;
 import org.apache.impala.thrift.TFunctionBinaryType;
-import org.apache.impala.thrift.TFunctionCategory;
 import org.apache.impala.thrift.TSymbolType;
+
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
index e8f6666..9d9dd80 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
@@ -17,6 +17,8 @@
 
 package org.apache.impala.analysis;
 
+import java.util.ArrayList;
+
 import org.apache.impala.authorization.AuthorizeableFn;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.authorization.PrivilegeRequest;
@@ -25,9 +27,6 @@ import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TDropFunctionParams;
-import org.apache.impala.thrift.TFunctionCategory;
-
-import java.util.ArrayList;
 
 /**
  * Represents a DROP [IF EXISTS] FUNCTION statement

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java b/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
index a2de730..83e227e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
@@ -156,7 +156,9 @@ public final class ExprSubstitutionMap {
     for (int i = 0; i < lhs_.size(); ++i) {
       for (int j = i + 1; j < lhs_.size(); ++j) {
         if (lhs_.get(i).equals(lhs_.get(j))) {
-          LOG.info("verify: smap=" + this.debugString());
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("verify: smap=" + this.debugString());
+          }
           Preconditions.checkState(false);
         }
       }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
index 42f9400..377ee1f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
@@ -207,9 +207,11 @@ public class InlineViewRef extends TableRef {
         analyzer.createAuxEquivPredicate(new SlotRef(slotDesc), colExpr.clone());
       }
     }
-    LOG.trace("inline view " + getUniqueAlias() + " smap: " + smap_.debugString());
-    LOG.trace("inline view " + getUniqueAlias() + " baseTblSmap: " +
-        baseTblSmap_.debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("inline view " + getUniqueAlias() + " smap: " + smap_.debugString());
+      LOG.trace("inline view " + getUniqueAlias() + " baseTblSmap: " +
+          baseTblSmap_.debugString());
+    }
 
     analyzeHints(analyzer);
     // Now do the remaining join analysis

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java b/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java
index c1d41cb..a4052c6 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java
@@ -172,7 +172,7 @@ public abstract class LiteralExpr extends Expr implements Comparable<LiteralExpr
     try {
       val = FeSupport.EvalConstExpr(constExpr, queryCtx);
     } catch (InternalException e) {
-      LOG.debug(String.format("Failed to evaluate expr '%s'",
+      LOG.error(String.format("Failed to evaluate expr '%s'",
           constExpr.toSql(), e.getMessage()));
       return null;
     }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
index b995cf0..3357df9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
@@ -22,11 +22,10 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsPartition;
@@ -37,11 +36,12 @@ import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.thrift.ImpalaInternalServiceConstants;
 import org.apache.impala.thrift.TLoadDataReq;
 import org.apache.impala.thrift.TTableName;
-import org.apache.impala.util.TAccessLevelUtil;
 import org.apache.impala.util.FsPermissionChecker;
+import org.apache.impala.util.TAccessLevelUtil;
+
 import com.google.common.base.Preconditions;
 
-/*
+/**
  * Represents a LOAD DATA statement for moving data into an existing table:
  * LOAD DATA INPATH 'filepath' [OVERWRITE] INTO TABLE <table name>
  * [PARTITION (partcol1=val1, partcol2=val2 ...)]

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java b/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
index f4a48b2..8463b31 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
@@ -25,7 +25,6 @@ import java.util.HashSet;
 import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.authorization.PrivilegeRequestBuilder;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.Table;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
index b89e586..c131f84 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
@@ -21,21 +21,18 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.Column;
-import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TPartitionKeyValue;
+
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
-/*
+/**
  * Represents a partition spec - a collection of partition key/values.
  */
 public class PartitionSpec extends PartitionSpecBase {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java b/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java
index 9d2249b..f1c95ab 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java
@@ -23,8 +23,8 @@ import org.apache.impala.authorization.PrivilegeRequestBuilder;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TResetMetadataRequest;
 import org.apache.impala.thrift.TTableName;
+
 import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
 
 /**
  * Representation of a REFRESH/INVALIDATE METADATA statement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
index d3dc8d2..264548c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
@@ -272,7 +272,9 @@ public class SelectStmt extends QueryStmt {
       graph.addDependencyPredicates(sortInfo_.getOrderingExprs());
     }
 
-    if (aggInfo_ != null) LOG.debug("post-analysis " + aggInfo_.debugString());
+    if (aggInfo_ != null) {
+      if (LOG.isDebugEnabled()) LOG.debug("post-analysis " + aggInfo_.debugString());
+    }
   }
 
   /**
@@ -397,9 +399,11 @@ public class SelectStmt extends QueryStmt {
     }
     baseTblResultExprs_ =
         Expr.trySubstituteList(resultExprs_, baseTblSmap_, analyzer, false);
-    LOG.trace("baseTblSmap_: " + baseTblSmap_.debugString());
-    LOG.trace("resultExprs: " + Expr.debugString(resultExprs_));
-    LOG.trace("baseTblResultExprs: " + Expr.debugString(baseTblResultExprs_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("baseTblSmap_: " + baseTblSmap_.debugString());
+      LOG.trace("resultExprs: " + Expr.debugString(resultExprs_));
+      LOG.trace("baseTblResultExprs: " + Expr.debugString(baseTblResultExprs_));
+    }
   }
 
   public List<TupleId> getTableRefIds() {
@@ -686,14 +690,18 @@ public class SelectStmt extends QueryStmt {
 
     ExprSubstitutionMap combinedSmap =
         ExprSubstitutionMap.compose(countAllMap, finalAggInfo.getOutputSmap(), analyzer);
-    LOG.trace("combined smap: " + combinedSmap.debugString());
 
     // change select list, having and ordering exprs to point to agg output. We need
     // to reanalyze the exprs at this point.
-    LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
-    LOG.trace("resultexprs: " + Expr.debugString(resultExprs_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("combined smap: " + combinedSmap.debugString());
+      LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
+      LOG.trace("resultexprs: " + Expr.debugString(resultExprs_));
+    }
     resultExprs_ = Expr.substituteList(resultExprs_, combinedSmap, analyzer, false);
-    LOG.trace("post-agg selectListExprs: " + Expr.debugString(resultExprs_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("post-agg selectListExprs: " + Expr.debugString(resultExprs_));
+    }
     if (havingPred_ != null) {
       // Make sure the predicate in the HAVING clause does not contain a
       // subquery.
@@ -701,12 +709,16 @@ public class SelectStmt extends QueryStmt {
           Predicates.instanceOf(Subquery.class)));
       havingPred_ = havingPred_.substitute(combinedSmap, analyzer, false);
       analyzer.registerConjuncts(havingPred_, true);
-      LOG.debug("post-agg havingPred: " + havingPred_.debugString());
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("post-agg havingPred: " + havingPred_.debugString());
+      }
     }
     if (sortInfo_ != null) {
       sortInfo_.substituteOrderingExprs(combinedSmap, analyzer);
-      LOG.debug("post-agg orderingExprs: " +
-          Expr.debugString(sortInfo_.getOrderingExprs()));
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("post-agg orderingExprs: " +
+            Expr.debugString(sortInfo_.getOrderingExprs()));
+      }
     }
 
     // check that all post-agg exprs point to agg output
@@ -855,13 +867,16 @@ public class SelectStmt extends QueryStmt {
     }
     // change select list and ordering exprs to point to analytic output. We need
     // to reanalyze the exprs at this point.
-    resultExprs_ = Expr.substituteList(resultExprs_, smap, analyzer,
-        false);
-    LOG.trace("post-analytic selectListExprs: " + Expr.debugString(resultExprs_));
+    resultExprs_ = Expr.substituteList(resultExprs_, smap, analyzer, false);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("post-analytic selectListExprs: " + Expr.debugString(resultExprs_));
+    }
     if (sortInfo_ != null) {
       sortInfo_.substituteOrderingExprs(smap, analyzer);
-      LOG.trace("post-analytic orderingExprs: " +
-          Expr.debugString(sortInfo_.getOrderingExprs()));
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("post-analytic orderingExprs: " +
+            Expr.debugString(sortInfo_.getOrderingExprs()));
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
index d102bb9..42839dd 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
@@ -17,15 +17,13 @@
 
 package org.apache.impala.analysis;
 
-import java.util.List;
-
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TShowFilesParams;
-import org.apache.impala.thrift.TPartitionKeyValue;
-import org.apache.impala.catalog.Table;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.thrift.TTableName;
+
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java b/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java
index eccb136..a53ffce 100644
--- a/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java
+++ b/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java
@@ -112,7 +112,7 @@ public class StmtRewriter {
       rewriteWhereClauseSubqueries(stmt, analyzer);
     }
     stmt.sqlString_ = null;
-    LOG.trace("rewritten stmt: " + stmt.toSql());
+    if (LOG.isTraceEnabled()) LOG.trace("rewritten stmt: " + stmt.toSql());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java b/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
index 0f1c9ce..714e9b1 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
@@ -17,19 +17,13 @@
 
 package org.apache.impala.analysis;
 
-import java.util.ArrayList;
-import java.util.List;
-
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.Table;
-import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
-import org.apache.impala.thrift.TTableName;
 import org.apache.impala.thrift.TTruncateParams;
+
 import com.google.common.base.Preconditions;
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
 
 /**
  * Representation of a TRUNCATE statement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java b/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java
index ad9150f..d5f1ef1 100644
--- a/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java
@@ -466,7 +466,9 @@ public class UnionStmt extends QueryStmt {
     TupleDescriptor tupleDesc = analyzer.getDescTbl().createTupleDescriptor("union");
     tupleDesc.setIsMaterialized(true);
     tupleId_ = tupleDesc.getId();
-    LOG.trace("UnionStmt.createMetadata: tupleId=" + tupleId_.toString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("UnionStmt.createMetadata: tupleId=" + tupleId_.toString());
+    }
 
     // One slot per expr in the select blocks. Use first select block as representative.
     List<Expr> firstSelectExprs = operands_.get(0).getQueryStmt().getResultExprs();

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
index b9a6440..43e86cf 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
@@ -22,6 +22,9 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.commons.lang.reflect.ConstructorUtils;
+import org.apache.impala.catalog.AuthorizationException;
+import org.apache.impala.catalog.AuthorizationPolicy;
+import org.apache.impala.common.InternalException;
 import org.apache.sentry.core.common.ActiveRoleSet;
 import org.apache.sentry.core.common.Subject;
 import org.apache.sentry.core.model.db.DBModelAction;
@@ -33,13 +36,10 @@ import org.apache.sentry.provider.common.ProviderBackendContext;
 import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
 import org.apache.sentry.provider.file.SimpleFileProviderBackend;
 
-import org.apache.impala.catalog.AuthorizationException;
-import org.apache.impala.catalog.AuthorizationPolicy;
-import org.apache.impala.common.InternalException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
-/*
+/**
  * Class used to check whether a user has access to a given resource.
  */
 public class AuthorizationChecker {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java
index b358528..1f488f3 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java
@@ -24,7 +24,7 @@ import org.apache.sentry.core.model.db.DBModelAuthorizable;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
-/*
+/**
  * Class used to authorize access to a database.
  */
 public class AuthorizeableDb extends Authorizeable {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java
index ebef8f9..cbc6bf6 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java
@@ -22,7 +22,7 @@ import java.util.List;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
-/*
+/**
  * Class used to authorize access to a URI.
  */
 public class AuthorizeableUri extends Authorizeable {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java b/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java
index b68e8ea..edb77a2 100644
--- a/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java
+++ b/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java
@@ -17,7 +17,7 @@
 
 package org.apache.impala.authorization;
 
-/*
+/**
  * A singleton class that represents a special user type used for internal Impala
  * sessions (for example, populating the debug webpage Catalog view). This user has
  * all privileges on all objects in the server.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/authorization/Privilege.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/Privilege.java b/fe/src/main/java/org/apache/impala/authorization/Privilege.java
index c087b73..453558b 100644
--- a/fe/src/main/java/org/apache/impala/authorization/Privilege.java
+++ b/fe/src/main/java/org/apache/impala/authorization/Privilege.java
@@ -21,7 +21,7 @@ import java.util.EnumSet;
 
 import org.apache.sentry.core.model.db.DBModelAction;
 
-/*
+/**
  * Maps an Impala Privilege to one or more Hive Access "Actions".
  */
 public enum Privilege {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java b/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java
index 7dbc23a..d206908 100644
--- a/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java
+++ b/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java
@@ -19,7 +19,7 @@ package org.apache.impala.authorization;
 
 import com.google.common.base.Preconditions;
 
-/*
+/**
  * Represents a privilege request in the context of an Authorizeable object. If no
  * Authorizeable object is provided, it represents a privilege request on the server.
  * For example, SELECT on table Foo in database Bar.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/467642f2/fe/src/main/java/org/apache/impala/authorization/User.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/User.java b/fe/src/main/java/org/apache/impala/authorization/User.java
index e29b19f..e699783 100644
--- a/fe/src/main/java/org/apache/impala/authorization/User.java
+++ b/fe/src/main/java/org/apache/impala/authorization/User.java
@@ -17,18 +17,17 @@
 
 package org.apache.impala.authorization;
 
-import com.google.common.base.Preconditions;
-import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
 
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.common.RuntimeEnv;
-import org.apache.impala.service.BackendConfig;
 
-import java.io.IOException;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 
-/*
+/**
  * Class that represents a User of an Impala session.
  */
 public class User {
@@ -37,7 +36,7 @@ public class User {
 
   // Refer to BackendConfig.initAuthToLocal() for initialization
   // of static auth_to_local configs in KerberosName class.
-  private KerberosName kerberosName_;
+  private final KerberosName kerberosName_;
 
   public User(String name) {
     Preconditions.checkNotNull(name);


[2/5] incubator-impala git commit: Fix undefined calls to __builtin_ctz.

Posted by kw...@apache.org.
Fix undefined calls to __builtin_ctz.

GCC's __builtin_ctz[l[l]] functions return undefined results when the
argument is 0. This patch handles that 0 case, which could otherwise
produce results that depend on the architecture.

Change-Id: I8460bc3f7e510ce07b8673387c9440accc432abe
Reviewed-on: http://gerrit.cloudera.org:8080/5004
Reviewed-by: Jim Apple <jb...@apache.org>
Reviewed-by: Dan Hecht <dh...@cloudera.com>
Tested-by: Impala Public Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/6d8fd7e7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/6d8fd7e7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/6d8fd7e7

Branch: refs/heads/master
Commit: 6d8fd7e79c44250ccd7fc88ade44b1bdd6ae2528
Parents: 352833b
Author: Jim Apple <jb...@cloudera.com>
Authored: Mon Nov 7 09:36:41 2016 -0800
Committer: Impala Public Jenkins <im...@gerrit.cloudera.org>
Committed: Thu Dec 1 05:22:45 2016 +0000

----------------------------------------------------------------------
 be/src/exprs/aggregate-functions-ir.cc | 25 ++++++++++++-------------
 be/src/udf_samples/hyperloglog-uda.cc  | 16 +++++++++-------
 be/src/util/bit-util.h                 | 25 ++++++++++++++++++++++++-
 3 files changed, 45 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/6d8fd7e7/be/src/exprs/aggregate-functions-ir.cc
----------------------------------------------------------------------
diff --git a/be/src/exprs/aggregate-functions-ir.cc b/be/src/exprs/aggregate-functions-ir.cc
index 090a905..63f06bf 100644
--- a/be/src/exprs/aggregate-functions-ir.cc
+++ b/be/src/exprs/aggregate-functions-ir.cc
@@ -780,8 +780,7 @@ void AggregateFunctions::PcUpdate(FunctionContext* c, const T& input, StringVal*
   // different seed).
   for (int i = 0; i < NUM_PC_BITMAPS; ++i) {
     uint32_t hash_value = AnyValUtil::Hash(input, *c->GetArgType(0), i);
-    int bit_index = __builtin_ctz(hash_value);
-    if (UNLIKELY(hash_value == 0)) bit_index = PC_BITMAP_LENGTH - 1;
+    const int bit_index = BitUtil::CountTrailingZeros(hash_value, PC_BITMAP_LENGTH - 1);
     // Set bitmap[i, bit_index] to 1
     SetDistinctEstimateBit(dst->ptr, i, bit_index);
   }
@@ -798,10 +797,10 @@ void AggregateFunctions::PcsaUpdate(FunctionContext* c, const T& input, StringVa
   uint32_t row_index = hash_value % NUM_PC_BITMAPS;
 
   // We want the zero-based position of the least significant 1-bit in binary
-  // representation of hash_value. __builtin_ctz does exactly this because it returns
-  // the number of trailing 0-bits in x (or undefined if x is zero).
-  int bit_index = __builtin_ctz(hash_value / NUM_PC_BITMAPS);
-  if (UNLIKELY(hash_value == 0)) bit_index = PC_BITMAP_LENGTH - 1;
+  // representation of hash_value. BitUtil::CountTrailingZeros(x,y) does exactly this
+  // because it returns the number of trailing 0-bits in x (or y if x is zero).
+  const int bit_index =
+      BitUtil::CountTrailingZeros(hash_value / NUM_PC_BITMAPS, PC_BITMAP_LENGTH - 1);
 
   // Set bitmap[row_index, bit_index] to 1
   SetDistinctEstimateBit(dst->ptr, row_index, bit_index);
@@ -1185,13 +1184,13 @@ void AggregateFunctions::HllUpdate(FunctionContext* ctx, const T& src, StringVal
   DCHECK_EQ(dst->len, HLL_LEN);
   uint64_t hash_value =
       AnyValUtil::Hash64(src, *ctx->GetArgType(0), HashUtil::FNV64_SEED);
-  if (hash_value != 0) {
-    // Use the lower bits to index into the number of streams and then
-    // find the first 1 bit after the index bits.
-    int idx = hash_value & (HLL_LEN - 1);
-    uint8_t first_one_bit = __builtin_ctzl(hash_value >> HLL_PRECISION) + 1;
-    dst->ptr[idx] = ::max(dst->ptr[idx], first_one_bit);
-  }
+  // Use the lower bits to index into the number of streams and then find the first 1 bit
+  // after the index bits.
+  int idx = hash_value & (HLL_LEN - 1);
+  const uint8_t first_one_bit =
+      1 + BitUtil::CountTrailingZeros(
+              hash_value >> HLL_PRECISION, sizeof(hash_value) * CHAR_BIT - HLL_PRECISION);
+  dst->ptr[idx] = ::max(dst->ptr[idx], first_one_bit);
 }
 
 // Specialize for DecimalVal to allow substituting decimal size.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/6d8fd7e7/be/src/udf_samples/hyperloglog-uda.cc
----------------------------------------------------------------------
diff --git a/be/src/udf_samples/hyperloglog-uda.cc b/be/src/udf_samples/hyperloglog-uda.cc
index c149a0f..d6db712 100644
--- a/be/src/udf_samples/hyperloglog-uda.cc
+++ b/be/src/udf_samples/hyperloglog-uda.cc
@@ -16,6 +16,7 @@
 // under the License.
 
 #include <assert.h>
+#include <limits.h>
 #include <math.h>
 #include <algorithm>
 #include <sstream>
@@ -71,13 +72,14 @@ void HllUpdate(FunctionContext* ctx, const IntVal& src, StringVal* dst) {
   assert(!dst->is_null);
   assert(dst->len == pow(2, HLL_PRECISION));
   uint64_t hash_value = Hash(src);
-  if (hash_value != 0) {
-    // Use the lower bits to index into the number of streams and then
-    // find the first 1 bit after the index bits.
-    int idx = hash_value % dst->len;
-    uint8_t first_one_bit = __builtin_ctzl(hash_value >> HLL_PRECISION) + 1;
-    dst->ptr[idx] = ::max(dst->ptr[idx], first_one_bit);
-  }
+  // Use the lower bits to index into the number of streams and then find the first 1 bit
+  // after the index bits.
+  int idx = hash_value % dst->len;
+  const uint64_t hash_top_bits = hash_value >> HLL_PRECISION;
+  uint8_t first_one_bit =
+      1 + ((hash_top_bits != 0) ? __builtin_ctzll(hash_top_bits) :
+                                  (sizeof(hash_value) * CHAR_BIT - HLL_PRECISION));
+  dst->ptr[idx] = ::max(dst->ptr[idx], first_one_bit);
 }
 
 void HllMerge(FunctionContext* ctx, const StringVal& src, StringVal* dst) {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/6d8fd7e7/be/src/util/bit-util.h
----------------------------------------------------------------------
diff --git a/be/src/util/bit-util.h b/be/src/util/bit-util.h
index deb16a9..bc9bcaf 100644
--- a/be/src/util/bit-util.h
+++ b/be/src/util/bit-util.h
@@ -25,9 +25,12 @@
 #include <endian.h>
 #endif
 
-#include <boost/type_traits/make_unsigned.hpp>
+#include <climits>
+
 #include <limits>
 
+#include <boost/type_traits/make_unsigned.hpp>
+
 #include "common/compiler-util.h"
 #include "util/cpu-info.h"
 #include "util/sse-util.h"
@@ -239,6 +242,26 @@ class BitUtil {
   constexpr static T UnsetBit(T v, int bitpos) {
     return v & ~(static_cast<T>(0x1) << bitpos);
   }
+
+  /// Wrappers around __builtin_ctz, which returns an undefined value when the argument is
+  /// zero.
+  static inline int CountTrailingZeros(
+      unsigned int v, int otherwise = sizeof(unsigned int) * CHAR_BIT) {
+    if (UNLIKELY(v == 0)) return otherwise;
+    return __builtin_ctz(v);
+  }
+
+  static inline int CountTrailingZeros(
+      unsigned long v, int otherwise = sizeof(unsigned long) * CHAR_BIT) {
+    if (UNLIKELY(v == 0)) return otherwise;
+    return __builtin_ctzl(v);
+  }
+
+  static inline int CountTrailingZeros(
+      unsigned long long v, int otherwise = sizeof(unsigned long long) * CHAR_BIT) {
+    if (UNLIKELY(v == 0)) return otherwise;
+    return __builtin_ctzll(v);
+  }
 };
 
 /// An encapsulation class of SIMD byteswap functions