You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jo...@apache.org on 2019/01/10 21:19:54 UTC

[02/10] impala git commit: IMPALA-7867 (Part 4): Collection cleanup in catalog

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/StructType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/StructType.java b/fe/src/main/java/org/apache/impala/catalog/StructType.java
index 77d4648..53d8622 100644
--- a/fe/src/main/java/org/apache/impala/catalog/StructType.java
+++ b/fe/src/main/java/org/apache/impala/catalog/StructType.java
@@ -20,23 +20,21 @@ package org.apache.impala.catalog;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.lang3.StringUtils;
-
 import org.apache.impala.thrift.TColumnType;
-import org.apache.impala.thrift.TStructField;
 import org.apache.impala.thrift.TTypeNode;
 import org.apache.impala.thrift.TTypeNodeType;
+
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
 /**
  * Describes a STRUCT type. STRUCT types have a list of named struct fields.
  */
 public class StructType extends Type {
-  private final HashMap<String, StructField> fieldMap_ = Maps.newHashMap();
+  private final Map<String, StructField> fieldMap_ = new HashMap<>();
   private final List<StructField> fields_;
 
   public StructType(List<StructField> fields) {
@@ -49,13 +47,13 @@ public class StructType extends Type {
   }
 
   public StructType() {
-    fields_ = Lists.newArrayList();
+    fields_ = new ArrayList<>();
   }
 
   @Override
   public String toSql(int depth) {
     if (depth >= MAX_NESTING_DEPTH) return "STRUCT<...>";
-    ArrayList<String> fieldsSql = Lists.newArrayList();
+    List<String> fieldsSql = new ArrayList<>();
     for (StructField f: fields_) fieldsSql.add(f.toSql(depth + 1));
     return String.format("STRUCT<%s>", Joiner.on(",").join(fieldsSql));
   }
@@ -63,7 +61,7 @@ public class StructType extends Type {
   @Override
   protected String prettyPrint(int lpad) {
     String leftPadding = StringUtils.repeat(' ', lpad);
-    ArrayList<String> fieldsSql = Lists.newArrayList();
+    List<String> fieldsSql = new ArrayList<>();
     for (StructField f: fields_) fieldsSql.add(f.prettyPrint(lpad + 2));
     return String.format("%sSTRUCT<\n%s\n%s>",
         leftPadding, Joiner.on(",\n").join(fieldsSql), leftPadding);
@@ -100,7 +98,7 @@ public class StructType extends Type {
     Preconditions.checkNotNull(fields_);
     Preconditions.checkState(!fields_.isEmpty());
     node.setType(TTypeNodeType.STRUCT);
-    node.setStruct_fields(new ArrayList<TStructField>());
+    node.setStruct_fields(new ArrayList<>());
     for (StructField field: fields_) {
       field.toThrift(container, node);
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/Table.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java b/fe/src/main/java/org/apache/impala/catalog/Table.java
index 97cbd62..f506078 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Table.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Table.java
@@ -19,6 +19,7 @@ package org.apache.impala.catalog;
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -52,7 +53,6 @@ import org.apache.log4j.Logger;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
 /**
  * Base class for table metadata.
@@ -94,10 +94,10 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
 
   // colsByPos[i] refers to the ith column in the table. The first numClusteringCols are
   // the clustering columns.
-  protected final ArrayList<Column> colsByPos_ = Lists.newArrayList();
+  protected final ArrayList<Column> colsByPos_ = new ArrayList<>();
 
   // map from lowercase column name to Column object.
-  private final Map<String, Column> colsByName_ = Maps.newHashMap();
+  private final Map<String, Column> colsByName_ = new HashMap<>();
 
   // Type of this table (array of struct) that mirrors the columns. Useful for analysis.
   protected final ArrayType type_ = new ArrayType(new StructType());
@@ -135,6 +135,7 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
   }
 
   public ReentrantLock getLock() { return tableLock_; }
+  @Override
   public abstract TTableDescriptor toThriftDescriptor(
       int tableId, Set<Long> referencedPartitions);
 
@@ -214,7 +215,7 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
   // stats. This method allows each table type to volunteer the set of columns we should
   // ask the metastore for in loadAllColumnStats().
   protected List<String> getColumnNamesWithHmsStats() {
-    List<String> ret = Lists.newArrayList();
+    List<String> ret = new ArrayList<>();
     for (String name: colsByName_.keySet()) ret.add(name);
     return ret;
   }
@@ -356,8 +357,8 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
     table.setAccess_level(accessLevel_);
 
     // Populate both regular columns and clustering columns (if there are any).
-    table.setColumns(new ArrayList<TColumn>());
-    table.setClustering_columns(new ArrayList<TColumn>());
+    table.setColumns(new ArrayList<>());
+    table.setClustering_columns(new ArrayList<>());
     for (int i = 0; i < colsByPos_.size(); ++i) {
       TColumn colDesc = colsByPos_.get(i).toThrift();
       // Clustering columns come first.
@@ -453,7 +454,7 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
   }
 
   @Override // FeTable
-  public ArrayList<Column> getColumns() { return colsByPos_; }
+  public List<Column> getColumns() { return colsByPos_; }
 
   @Override // FeTable
   public List<String> getColumnNames() { return Column.toColumnNames(colsByPos_); }
@@ -474,7 +475,7 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
 
   @Override // FeTable
   public List<Column> getColumnsInHiveOrder() {
-    ArrayList<Column> columns = Lists.newArrayList(getNonClusteringColumns());
+    List<Column> columns = Lists.newArrayList(getNonClusteringColumns());
     columns.addAll(getClusteringColumns());
     return Collections.unmodifiableList(columns);
   }

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java b/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java
index d07c0ed..b79af30 100644
--- a/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java
+++ b/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java
@@ -17,6 +17,7 @@
 
 package org.apache.impala.catalog;
 
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Callable;
@@ -34,7 +35,6 @@ import org.apache.impala.util.HdfsCachingUtil;
 import org.apache.log4j.Logger;
 
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
 
 /**
 * Class that manages scheduling the loading of table metadata from the Hive Metastore and
@@ -114,13 +114,13 @@ public class TableLoadingMgr {
   // attempts to load the same table by a different thread become no-ops.
   // This map is different from loadingTables_ because the latter tracks all in-flight
   // loads - even those being processed by threads other than table loading threads.
-  private final ConcurrentHashMap<TTableName, AtomicBoolean> tableLoadingBarrier_ =
-      new ConcurrentHashMap<TTableName, AtomicBoolean>();
+  private final Map<TTableName, AtomicBoolean> tableLoadingBarrier_ =
+      new ConcurrentHashMap<>();
 
   // Map of table name to a FutureTask associated with the table load. Used to
   // prevent duplicate loads of the same table.
-  private final ConcurrentHashMap<TTableName, FutureTask<Table>> loadingTables_ =
-      new ConcurrentHashMap<TTableName, FutureTask<Table>>();
+  private final Map<TTableName, FutureTask<Table>> loadingTables_ =
+      new ConcurrentHashMap<>();
 
   // Map of table name to the cache directives that are being waited on for that table.
   // Once all directives have completed, the table's metadata will be refreshed and
@@ -128,7 +128,7 @@ public class TableLoadingMgr {
   // A caching operation may take a long time to complete, so to maximize query
   // throughput it is preferable to allow the user to continue to run queries against
   // the table while a cache request completes in the background.
-  private final Map<TTableName, List<Long>> pendingTableCacheDirs_ = Maps.newHashMap();
+  private final Map<TTableName, List<Long>> pendingTableCacheDirs_ = new HashMap<>();
 
   // The number of parallel threads to use to load table metadata. Should be set to a
   // value that provides good throughput while not putting too much stress on the

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/TopicUpdateLog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/TopicUpdateLog.java b/fe/src/main/java/org/apache/impala/catalog/TopicUpdateLog.java
index 9d23c4f..779d8f7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/TopicUpdateLog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/TopicUpdateLog.java
@@ -17,10 +17,11 @@
 
 package org.apache.impala.catalog;
 
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.log4j.Logger;
+
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
@@ -90,7 +91,7 @@ public class TopicUpdateLog {
 
   // Entries in the topic update log stored as a map of catalog object keys to
   // Entry objects.
-  private final ConcurrentHashMap<String, Entry> topicLogEntries_ =
+  private final Map<String, Entry> topicLogEntries_ =
       new ConcurrentHashMap<>();
 
   /**

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/Type.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Type.java b/fe/src/main/java/org/apache/impala/catalog/Type.java
index 73f15e0..c98d7aa 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Type.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Type.java
@@ -31,6 +31,7 @@ import org.apache.impala.thrift.TPrimitiveType;
 import org.apache.impala.thrift.TScalarType;
 import org.apache.impala.thrift.TStructField;
 import org.apache.impala.thrift.TTypeNode;
+
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
@@ -71,19 +72,19 @@ public abstract class Type {
   public static final ScalarType FIXED_UDA_INTERMEDIATE =
       ScalarType.createFixedUdaIntermediateType(-1);
 
-  private static ArrayList<ScalarType> integerTypes;
-  private static ArrayList<ScalarType> numericTypes;
-  private static ArrayList<ScalarType> supportedTypes;
-  private static ArrayList<ScalarType> unsupportedTypes;
+  private static List<ScalarType> integerTypes;
+  private static List<ScalarType> numericTypes;
+  private static List<ScalarType> supportedTypes;
+  private static List<ScalarType> unsupportedTypes;
 
   static {
-    integerTypes = Lists.newArrayList();
+    integerTypes = new ArrayList<>();
     integerTypes.add(TINYINT);
     integerTypes.add(SMALLINT);
     integerTypes.add(INT);
     integerTypes.add(BIGINT);
 
-    numericTypes = Lists.newArrayList();
+    numericTypes = new ArrayList<>();
     numericTypes.add(TINYINT);
     numericTypes.add(SMALLINT);
     numericTypes.add(INT);
@@ -92,7 +93,7 @@ public abstract class Type {
     numericTypes.add(DOUBLE);
     numericTypes.add(DECIMAL);
 
-    supportedTypes = Lists.newArrayList();
+    supportedTypes = new ArrayList<>();
     supportedTypes.add(NULL);
     supportedTypes.add(BOOLEAN);
     supportedTypes.add(TINYINT);
@@ -107,22 +108,22 @@ public abstract class Type {
     supportedTypes.add(TIMESTAMP);
     supportedTypes.add(DECIMAL);
 
-    unsupportedTypes = Lists.newArrayList();
+    unsupportedTypes = new ArrayList<>();
     unsupportedTypes.add(BINARY);
     unsupportedTypes.add(DATE);
     unsupportedTypes.add(DATETIME);
   }
 
-  public static ArrayList<ScalarType> getIntegerTypes() {
+  public static List<ScalarType> getIntegerTypes() {
     return integerTypes;
   }
-  public static ArrayList<ScalarType> getNumericTypes() {
+  public static List<ScalarType> getNumericTypes() {
     return numericTypes;
   }
-  public static ArrayList<ScalarType> getSupportedTypes() {
+  public static List<ScalarType> getSupportedTypes() {
     return supportedTypes;
   }
-  public static ArrayList<ScalarType> getUnsupportedTypes() {
+  public static List<ScalarType> getUnsupportedTypes() {
     return unsupportedTypes;
   }
 
@@ -229,7 +230,7 @@ public abstract class Type {
 
   public TColumnType toThrift() {
     TColumnType container = new TColumnType();
-    container.setTypes(new ArrayList<TTypeNode>());
+    container.setTypes(new ArrayList<>());
     toThrift(container);
     return container;
   }
@@ -367,8 +368,8 @@ public abstract class Type {
     return toThrift(Lists.newArrayList(types));
   }
 
-  public static List<TColumnType> toThrift(ArrayList<Type> types) {
-    ArrayList<TColumnType> result = Lists.newArrayList();
+  public static List<TColumnType> toThrift(List<Type> types) {
+    List<TColumnType> result = new ArrayList<>();
     for (Type t: types) {
       result.add(t.toThrift());
     }
@@ -429,7 +430,7 @@ public abstract class Type {
       }
       case STRUCT: {
         Preconditions.checkState(nodeIdx + node.getStruct_fieldsSize() < col.getTypesSize());
-        ArrayList<StructField> structFields = Lists.newArrayList();
+        List<StructField> structFields = new ArrayList<>();
         ++nodeIdx;
         for (int i = 0; i < node.getStruct_fieldsSize(); ++i) {
           TStructField thriftField = node.getStruct_fields().get(i);

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/local/CatalogdMetaProvider.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/CatalogdMetaProvider.java b/fe/src/main/java/org/apache/impala/catalog/local/CatalogdMetaProvider.java
index 0c74dfa..0bc04a2 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/CatalogdMetaProvider.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/CatalogdMetaProvider.java
@@ -19,6 +19,7 @@ package org.apache.impala.catalog.local;
 
 import java.lang.management.ManagementFactory;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -41,10 +42,10 @@ import org.apache.impala.catalog.Catalog;
 import org.apache.impala.catalog.CatalogDeltaLog;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.Function;
-import org.apache.impala.catalog.Principal;
-import org.apache.impala.catalog.PrincipalPrivilege;
 import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.ImpaladCatalog.ObjectUpdateSequencer;
+import org.apache.impala.catalog.Principal;
+import org.apache.impala.catalog.PrincipalPrivilege;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.common.Pair;
 import org.apache.impala.common.Reference;
@@ -608,7 +609,6 @@ public class CatalogdMetaProvider implements MetaProvider {
     return ret;
   }
 
-  @SuppressWarnings("unchecked")
   @Override
   public List<PartitionRef> loadPartitionList(final TableMetaRef table)
       throws TException {
@@ -652,7 +652,7 @@ public class CatalogdMetaProvider implements MetaProvider {
     final int numMisses = partitionRefs.size() - numHits;
 
     // Load the remainder from the catalogd.
-    List<PartitionRef> missingRefs = Lists.newArrayList();
+    List<PartitionRef> missingRefs = new ArrayList<>();
     for (PartitionRef ref: partitionRefs) {
       if (!refToMeta.containsKey(ref)) missingRefs.add(ref);
     }
@@ -1049,7 +1049,7 @@ public class CatalogdMetaProvider implements MetaProvider {
    */
   @VisibleForTesting
   void invalidateCacheForObject(TCatalogObject obj) {
-    List<String> invalidated = Lists.newArrayList();
+    List<String> invalidated = new ArrayList<>();
     switch (obj.type) {
     case TABLE:
     case VIEW:

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java b/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
index edba217..8bc8996 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
@@ -19,6 +19,7 @@ package org.apache.impala.catalog.local;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
@@ -292,7 +293,7 @@ class DirectMetaProvider implements MetaProvider {
       String partName, Partition msPartition, ListMap<TNetworkAddress> hostIndex) {
     Path partDir = new Path(msPartition.getSd().getLocation());
 
-    List<LocatedFileStatus> stats = Lists.newArrayList();
+    List<LocatedFileStatus> stats = new ArrayList<>();
     try {
       FileSystem fs = partDir.getFileSystem(CONF);
       RemoteIterator<LocatedFileStatus> it = fs.listFiles(partDir, /*recursive=*/false);

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
index 96e3325..56d275a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
@@ -17,6 +17,7 @@
 
 package org.apache.impala.catalog.local;
 
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -37,10 +38,10 @@ import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.Function.CompareMode;
-import org.apache.impala.common.InternalException;
 import org.apache.impala.catalog.HdfsCachePool;
 import org.apache.impala.catalog.PartitionNotFoundException;
 import org.apache.impala.catalog.PrunablePartition;
+import org.apache.impala.common.InternalException;
 import org.apache.impala.thrift.TCatalogObject;
 import org.apache.impala.thrift.TGetPartitionStatsResponse;
 import org.apache.impala.thrift.TPartitionKeyValue;
@@ -50,7 +51,6 @@ import org.apache.thrift.TException;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
 
 /**
  * Implementation of FeCatalog which runs within the impalad and fetches metadata
@@ -69,7 +69,7 @@ import com.google.common.collect.Maps;
  */
 public class LocalCatalog implements FeCatalog {
   private final MetaProvider metaProvider_;
-  private Map<String, FeDb> dbs_ = Maps.newHashMap();
+  private Map<String, FeDb> dbs_ = new HashMap<>();
   private String nullPartitionKeyValue_;
   private final String defaultKuduMasterHosts_;
 
@@ -86,7 +86,7 @@ public class LocalCatalog implements FeCatalog {
 
   private void loadDbs() {
     if (!dbs_.isEmpty()) return;
-    Map<String, FeDb> dbs = Maps.newHashMap();
+    Map<String, FeDb> dbs = new HashMap<>();
     List<String> names;
     try {
       names = metaProvider_.loadDbList();

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
index c7f7116..20d84f5 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
@@ -17,7 +17,9 @@
 
 package org.apache.impala.catalog.local;
 
+import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -37,13 +39,10 @@ import org.apache.impala.thrift.TFunctionCategory;
 import org.apache.impala.util.FunctionUtils;
 import org.apache.impala.util.PatternMatcher;
 import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
 
@@ -54,8 +53,6 @@ import com.google.common.collect.Maps;
  * each catalog instance.
  */
 class LocalDb implements FeDb {
-  private static final Logger LOG = LoggerFactory.getLogger(LocalDb.class);
-
   private final LocalCatalog catalog_;
   /** The lower-case name of the database. */
   private final String name_;
@@ -149,7 +146,7 @@ class LocalDb implements FeDb {
    */
   private void loadTableNames() {
     if (tables_ != null) return;
-    Map<String, LocalTable> newMap = Maps.newHashMap();
+    Map<String, LocalTable> newMap = new HashMap<>();
     try {
       List<String> names = catalog_.getMetaProvider().loadTableNames(name_);
       for (String tableName : names) {
@@ -242,7 +239,7 @@ class LocalDb implements FeDb {
   public List<Function> getFunctions(
       TFunctionCategory category, PatternMatcher matcher) {
     loadFunctionNames();
-    List<Function> result = Lists.newArrayList();
+    List<Function> result = new ArrayList<>();
     Iterable<String> fnNames = Iterables.filter(functions_.keySet(), matcher);
     for (String fnName : fnNames) {
       result.addAll(getFunctions(category, fnName));

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
index 5b16fbe..a1266c3 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
@@ -20,6 +20,7 @@ package org.apache.impala.catalog.local;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -65,7 +66,6 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
 public class LocalFsTable extends LocalTable implements FeFsTable {
   /**
@@ -81,7 +81,7 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
    *
    * Set by loadPartitionValueMap().
    */
-  private ArrayList<TreeMap<LiteralExpr, HashSet<Long>>> partitionValueMap_;
+  private List<TreeMap<LiteralExpr, Set<Long>>> partitionValueMap_;
 
   /**
    * For each partition column, the set of partition IDs having a NULL value
@@ -89,7 +89,7 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
    *
    * Set by loadPartitionValueMap().
    */
-  private ArrayList<HashSet<Long>> nullPartitionIds_;
+  private List<Set<Long>> nullPartitionIds_;
 
   /**
    * The value that will be stored in a partition name to indicate NULL.
@@ -277,7 +277,7 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
       // null means "all partitions".
       referencedPartitions = getPartitionIds();
     }
-    Map<Long, THdfsPartition> idToPartition = Maps.newHashMap();
+    Map<Long, THdfsPartition> idToPartition = new HashMap<>();
     List<? extends FeFsPartition> partitions = loadPartitions(referencedPartitions);
     for (FeFsPartition partition : partitions) {
       idToPartition.put(partition.getId(),
@@ -360,7 +360,7 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
   }
 
   @Override
-  public TreeMap<LiteralExpr, HashSet<Long>> getPartitionValueMap(int col) {
+  public TreeMap<LiteralExpr, Set<Long>> getPartitionValueMap(int col) {
     loadPartitionValueMap();
     return partitionValueMap_.get(col);
   }
@@ -383,7 +383,7 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
     // Possible in the case that all partitions were pruned.
     if (ids.isEmpty()) return Collections.emptyList();
 
-    List<PartitionRef> refs = Lists.newArrayList();
+    List<PartitionRef> refs = new ArrayList<>();
     for (Long id : ids) {
       LocalPartitionSpec spec = partitionSpecs_.get(id);
       Preconditions.checkArgument(spec != null, "Invalid partition ID for table %s: %s",
@@ -430,13 +430,13 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
     if (partitionValueMap_ != null) return;
 
     loadPartitionSpecs();
-    ArrayList<TreeMap<LiteralExpr, HashSet<Long>>> valMapByCol =
+    List<TreeMap<LiteralExpr, Set<Long>>> valMapByCol =
         new ArrayList<>();
-    ArrayList<HashSet<Long>> nullParts = new ArrayList<>();
+    List<Set<Long>> nullParts = new ArrayList<>();
 
     for (int i = 0; i < getNumClusteringCols(); i++) {
-      valMapByCol.add(new TreeMap<LiteralExpr, HashSet<Long>>());
-      nullParts.add(new HashSet<Long>());
+      valMapByCol.add(new TreeMap<>());
+      nullParts.add(new HashSet<>());
     }
     for (LocalPartitionSpec partition : partitionSpecs_.values()) {
       List<LiteralExpr> vals = partition.getPartitionValues();
@@ -447,7 +447,7 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
           continue;
         }
 
-        HashSet<Long> ids = valMapByCol.get(i).get(val);
+        Set<Long> ids = valMapByCol.get(i).get(val);
         if (ids == null) {
           ids = new HashSet<>();
           valMapByCol.get(i).put(val,  ids);

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/local/LocalHbaseTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalHbaseTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalHbaseTable.java
index 8480500..ca88a9f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalHbaseTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalHbaseTable.java
@@ -17,6 +17,10 @@
 
 package org.apache.impala.catalog.local;
 
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -30,10 +34,6 @@ import org.apache.impala.thrift.TResultSet;
 import org.apache.impala.thrift.TTableDescriptor;
 import org.apache.impala.thrift.TTableType;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Set;
-
 public class LocalHbaseTable extends LocalTable implements FeHBaseTable {
   // Name of table in HBase.
   // 'this.name' is the alias of the HBase table in Hive.
@@ -102,7 +102,7 @@ public class LocalHbaseTable extends LocalTable implements FeHBaseTable {
   }
 
   @Override
-  public ArrayList<Column> getColumnsInHiveOrder() {
+  public List<Column> getColumnsInHiveOrder() {
     return getColumns();
   }
 

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
index fc48ca1..3ba2db3 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
@@ -176,7 +176,7 @@ public class LocalKuduTable extends LocalTable implements FeKuduTable {
     Preconditions.checkNotNull(partitionBy_);
     // IMPALA-5154: partitionBy_ may be empty if Kudu table created outside Impala,
     // partition_by must be explicitly created because the field is required.
-    tbl.partition_by = Lists.newArrayList();
+    tbl.partition_by = new ArrayList<>();
     for (KuduPartitionParam partitionParam: partitionBy_) {
       tbl.addToPartition_by(partitionParam.toThrift());
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
index 81a0741..c544890 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
@@ -185,14 +185,14 @@ abstract class LocalTable implements FeTable {
   }
 
   @Override
-  public ArrayList<Column> getColumns() {
+  public List<Column> getColumns() {
     // TODO(todd) why does this return ArrayList instead of List?
     return new ArrayList<>(cols_.colsByPos_);
   }
 
   @Override
   public List<Column> getColumnsInHiveOrder() {
-    ArrayList<Column> columns = Lists.newArrayList(getNonClusteringColumns());
+    List<Column> columns = Lists.newArrayList(getNonClusteringColumns());
     columns.addAll(getClusteringColumns());
     return columns;
   }
@@ -327,7 +327,7 @@ abstract class LocalTable implements FeTable {
 
 
     private static StructType columnsToStructType(List<Column> cols) {
-      ArrayList<StructField> fields = Lists.newArrayListWithCapacity(cols.size());
+      List<StructField> fields = Lists.newArrayListWithCapacity(cols.size());
       for (Column col : cols) {
         fields.add(new StructField(col.getName(), col.getType(), col.getComment()));
       }

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
index fad5cf0..9fb204a 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
@@ -252,7 +252,7 @@ public class HdfsPartitionPruner {
     // Get the partition column position and retrieve the associated partition
     // value metadata.
     int partitionPos = slot.getDesc().getColumn().getPosition();
-    TreeMap<LiteralExpr, HashSet<Long>> partitionValueMap =
+    TreeMap<LiteralExpr, Set<Long>> partitionValueMap =
         tbl_.getPartitionValueMap(partitionPos);
     if (partitionValueMap.isEmpty()) return new HashSet<>();
 
@@ -297,7 +297,7 @@ public class HdfsPartitionPruner {
     }
 
     // Determine the partition key value range of this predicate.
-    NavigableMap<LiteralExpr, HashSet<Long>> rangeValueMap = null;
+    NavigableMap<LiteralExpr, Set<Long>> rangeValueMap = null;
     LiteralExpr firstKey = partitionValueMap.firstKey();
     LiteralExpr lastKey = partitionValueMap.lastKey();
     boolean upperInclusive = false;
@@ -357,7 +357,7 @@ public class HdfsPartitionPruner {
     SlotRef slot = inPredicate.getBoundSlot();
     Preconditions.checkNotNull(slot);
     int partitionPos = slot.getDesc().getColumn().getPosition();
-    TreeMap<LiteralExpr, HashSet<Long>> partitionValueMap =
+    TreeMap<LiteralExpr, Set<Long>> partitionValueMap =
         tbl_.getPartitionValueMap(partitionPos);
 
     if (inPredicate.isNotIn()) {

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
index 114e289..0747464 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
@@ -19,6 +19,7 @@ package org.apache.impala.catalog;
 
 import static org.junit.Assert.fail;
 
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Map;
 
@@ -228,7 +229,7 @@ public class CatalogObjectToFromThriftTest {
       new HdfsPartition(hdfsTable, part.toHmsPartition(),
         Lists.newArrayList(LiteralExpr.create("11.1", ScalarType.createDecimalType(1, 0)),
             LiteralExpr.create("11.1", ScalarType.createDecimalType(1, 0))),
-        null, Lists.<HdfsPartition.FileDescriptor>newArrayList(),
+        null, new ArrayList<>(),
         TAccessLevel.READ_WRITE);
       fail("Expected metadata to be malformed.");
     } catch (SqlCastException e) {

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
index 2226dc0..0946b38 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
@@ -70,7 +70,6 @@ import org.junit.Test;
 import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 public class CatalogTest {
@@ -430,7 +429,7 @@ public class CatalogTest {
     assertEquals(24, partitions.size());
     Set<HdfsStorageDescriptor> uniqueSds = Collections.newSetFromMap(
         new IdentityHashMap<HdfsStorageDescriptor, Boolean>());
-    Set<Long> months = Sets.newHashSet();
+    Set<Long> months = new HashSet<>();
     for (FeFsPartition p: partitions) {
       assertEquals(2, p.getPartitionValues().size());
 
@@ -740,7 +739,7 @@ public class CatalogTest {
 
   private List<String> getFunctionSignatures(String db) throws DatabaseNotFoundException {
     List<Function> fns = catalog_.getFunctions(db);
-    List<String> names = Lists.newArrayList();
+    List<String> names = new ArrayList<>();
     for (Function fn: fns) {
       names.add(fn.signatureString());
     }
@@ -752,9 +751,9 @@ public class CatalogTest {
     List<String> fnNames = getFunctionSignatures("default");
     assertEquals(fnNames.size(), 0);
 
-    ArrayList<Type> args1 = Lists.newArrayList();
-    ArrayList<Type> args2 = Lists.<Type>newArrayList(Type.INT);
-    ArrayList<Type> args3 = Lists.<Type>newArrayList(Type.TINYINT);
+    List<Type> args1 = new ArrayList<>();
+    List<Type> args2 = Lists.<Type>newArrayList(Type.INT);
+    List<Type> args3 = Lists.<Type>newArrayList(Type.TINYINT);
 
     catalog_.removeFunction(
         new Function(new FunctionName("default", "Foo"), args1,
@@ -837,7 +836,7 @@ public class CatalogTest {
     assertEquals(fnNames.size(), 0);
 
     // Test to check if catalog can handle loading corrupt udfs
-    HashMap<String, String> dbParams = Maps.newHashMap();
+    Map<String, String> dbParams = new HashMap<>();
     String badFnKey = "impala_registered_function_badFn";
     String badFnVal = Base64.encodeBase64String("badFn".getBytes());
     String dbName = "corrupt_udf_test";
@@ -899,7 +898,7 @@ public class CatalogTest {
     assertNull(catalog_.getAuthPolicy().getPrincipal("role1", TPrincipalType.USER));
     assertNull(catalog_.getAuthPolicy().getPrincipal("role2", TPrincipalType.ROLE));
     // Add the same role, the old role will be deleted.
-    role = catalog_.addRole("role1", new HashSet<String>());
+    role = catalog_.addRole("role1", new HashSet<>());
     assertSame(role, authPolicy.getPrincipal("role1", TPrincipalType.ROLE));
     // Delete the role.
     assertSame(role, catalog_.removeRole("role1"));
@@ -912,7 +911,7 @@ public class CatalogTest {
     for (int i = 0; i < size; i++) {
       String name = prefix + i;
       catalog_.addUser(name);
-      catalog_.addRole(name, new HashSet<String>());
+      catalog_.addRole(name, new HashSet<>());
     }
 
     for (int i = 0; i < size; i++) {

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
index f40897d..72d0cde 100644
--- a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
@@ -18,7 +18,8 @@
 package org.apache.impala.catalog;
 
 import static org.apache.impala.catalog.HdfsPartition.comparePartitionKeyValues;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -48,13 +49,13 @@ public class HdfsPartitionTest {
     FeSupport.loadLibrary();
   }
 
-  private List<LiteralExpr> valuesNull_= Lists.newArrayList();
-  private List<LiteralExpr> valuesDecimal_ = Lists.newArrayList();
-  private List<LiteralExpr> valuesDecimal1_ = Lists.newArrayList();
-  private List<LiteralExpr> valuesDecimal2_ = Lists.newArrayList();
-  private List<LiteralExpr> valuesMixed_= Lists.newArrayList();
-  private List<LiteralExpr> valuesMixed1_ = Lists.newArrayList();
-  private List<LiteralExpr> valuesMixed2_ = Lists.newArrayList();
+  private List<LiteralExpr> valuesNull_= new ArrayList<>();
+  private List<LiteralExpr> valuesDecimal_ = new ArrayList<>();
+  private List<LiteralExpr> valuesDecimal1_ = new ArrayList<>();
+  private List<LiteralExpr> valuesDecimal2_ = new ArrayList<>();
+  private List<LiteralExpr> valuesMixed_= new ArrayList<>();
+  private List<LiteralExpr> valuesMixed1_ = new ArrayList<>();
+  private List<LiteralExpr> valuesMixed2_ = new ArrayList<>();
 
   public HdfsPartitionTest() {
     valuesNull_.add(NullLiteral.create(Type.BIGINT));
@@ -77,7 +78,7 @@ public class HdfsPartitionTest {
 
   @Test
   public void testCompare() {
-    List<List<LiteralExpr>> allLists = Lists.newArrayList();
+    List<List<LiteralExpr>> allLists = new ArrayList<>();
     allLists.add(valuesNull_);
     allLists.add(valuesDecimal_);
     allLists.add(valuesDecimal1_);
@@ -96,7 +97,7 @@ public class HdfsPartitionTest {
       }
     }
 
-    List<LiteralExpr> valuesTest = Lists.newArrayList();
+    List<LiteralExpr> valuesTest = new ArrayList<>();
     valuesTest.add(NumericLiteral.create(3));
     verifyAntiSymmetric(valuesDecimal1_, valuesTest, valuesNull_);
     valuesTest.add(NullLiteral.create(Type.BIGINT));

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java b/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java
index d814497..4abf782 100644
--- a/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java
@@ -27,12 +27,12 @@ import java.util.List;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.serde.serdeConstants;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
 import org.apache.impala.catalog.HdfsStorageDescriptor.InvalidStorageDescriptorException;
 import org.apache.impala.service.FeSupport;
 import org.apache.impala.thrift.THdfsFileFormat;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
 import com.google.common.collect.ImmutableList;
 
 public class HdfsStorageDescriptorTest {
@@ -96,32 +96,32 @@ public class HdfsStorageDescriptorTest {
   public void testDelimiters() throws InvalidStorageDescriptorException {
     StorageDescriptor sd = HiveStorageDescriptorFactory.createSd(THdfsFileFormat.TEXT,
         RowFormat.DEFAULT_ROW_FORMAT);
-    sd.setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.setParameters(new HashMap<>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.FIELD_DELIM, "-2");
     assertNotNull(HdfsStorageDescriptor.fromStorageDescriptor("fakeTbl", sd));
 
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.FIELD_DELIM, "-128");
     assertNotNull(HdfsStorageDescriptor.fromStorageDescriptor("fakeTbl", sd));
 
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.FIELD_DELIM, "127");
     assertNotNull(HdfsStorageDescriptor.fromStorageDescriptor("fakeTbl", sd));
 
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.LINE_DELIM, "\001");
     assertNotNull(HdfsStorageDescriptor.fromStorageDescriptor("fakeTbl", sd));
 
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.FIELD_DELIM, "|");
     assertNotNull(HdfsStorageDescriptor.fromStorageDescriptor("fakeTbl", sd));
 
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.FIELD_DELIM, "\t");
     assertNotNull(HdfsStorageDescriptor.fromStorageDescriptor("fakeTbl", sd));
 
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.FIELD_DELIM, "ab");
     try {
       HdfsStorageDescriptor.fromStorageDescriptor("fake", sd);
@@ -132,7 +132,7 @@ public class HdfsStorageDescriptorTest {
           e.getMessage());
     }
 
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.FIELD_DELIM, "128");
     try {
       HdfsStorageDescriptor.fromStorageDescriptor("fake", sd);
@@ -143,7 +143,7 @@ public class HdfsStorageDescriptorTest {
           e.getMessage());
     }
 
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.FIELD_DELIM, "\128");
     try {
       HdfsStorageDescriptor.fromStorageDescriptor("fake", sd);
@@ -154,7 +154,7 @@ public class HdfsStorageDescriptorTest {
           e.getMessage());
     }
 
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.LINE_DELIM, "-129");
     try {
       HdfsStorageDescriptor.fromStorageDescriptor("fake", sd);
@@ -166,7 +166,7 @@ public class HdfsStorageDescriptorTest {
     }
 
     // Test that a unicode character out of the valid range will not be accepted.
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.LINE_DELIM, "\u1111");
     try {
       HdfsStorageDescriptor.fromStorageDescriptor("fake", sd);
@@ -178,7 +178,7 @@ public class HdfsStorageDescriptorTest {
     }
 
     // Validate that unicode character in the valid range will be accepted.
-    sd.getSerdeInfo().setParameters(new HashMap<String,String>());
+    sd.getSerdeInfo().setParameters(new HashMap<>());
     sd.getSerdeInfo().putToParameters(serdeConstants.FIELD_DELIM, "\u0001");
     assertNotNull(HdfsStorageDescriptor.fromStorageDescriptor("fakeTbl", sd));
 

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoTest.java b/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoTest.java
index 6241523..1b5e288 100644
--- a/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoTest.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
@@ -30,8 +31,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import avro.shaded.com.google.common.collect.Lists;
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.service.BackendConfig;
@@ -52,6 +51,7 @@ import org.apache.thrift.TException;
 import org.apache.thrift.TSerializer;
 import org.junit.Test;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 
 public class PartialCatalogInfoTest {
@@ -97,7 +97,7 @@ public class PartialCatalogInfoTest {
     Preconditions.checkState(requestCount > 0);
     final ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(requestCount);
     final List<Future<TGetPartialCatalogObjectResponse>> tasksToWaitFor =
-        Lists.newArrayList();
+        new ArrayList<>();
     for (int i = 0; i < requestCount; ++i) {
       tasksToWaitFor.add(threadPoolExecutor.submit(new
           CallableGetPartialCatalogObjectRequest(request)));

http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java b/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java
index 43ac610..171652d 100644
--- a/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java
+++ b/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java
@@ -23,8 +23,7 @@ import java.util.Map;
 public class TestSchemaUtils {
   // maps from PrimitiveType to column name
   // in alltypes table
-  private static Map<Type, String> typeToColumnNameMap_ =
-      new HashMap<Type, String>();
+  private static Map<Type, String> typeToColumnNameMap_ = new HashMap<>();
   static {
     typeToColumnNameMap_.put(Type.BOOLEAN, "bool_col");
     typeToColumnNameMap_.put(Type.TINYINT, "tinyint_col");