You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jo...@apache.org on 2019/01/10 21:19:55 UTC
[03/10] impala git commit: IMPALA-7867 (Part 4): Collection cleanup
in catalog
IMPALA-7867 (Part 4): Collection cleanup in catalog
Continues the collection clean work to:
* Use collection interfaces for variable and function argument
declarations,
* Replace generic Guava newArrayList(), etc. calls with the direct use
of the Java collection classes,
* Clean up unused imports and add override annotations.
This patch focuses on the catalog module and its tests.
Tests: this is purely a code change, no functional change. Reran
existing tests.
Change-Id: Ic83425201c90966aae4c280d94cf1b427b3d71d1
Reviewed-on: http://gerrit.cloudera.org:8080/12131
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/049e1056
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/049e1056
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/049e1056
Branch: refs/heads/master
Commit: 049e1056f8d772c55274a54ff99e2cf44b82fa56
Parents: 43058cb
Author: paul-rogers <pr...@cloudera.com>
Authored: Wed Dec 26 22:56:08 2018 -0800
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Wed Jan 9 23:17:25 2019 +0000
----------------------------------------------------------------------
.../impala/catalog/AggregateFunction.java | 4 +-
.../impala/catalog/AuthorizationPolicy.java | 28 +++----
.../org/apache/impala/catalog/BuiltinsDb.java | 2 +-
.../java/org/apache/impala/catalog/Catalog.java | 14 ++--
.../impala/catalog/CatalogObjectCache.java | 4 +-
.../impala/catalog/CatalogServiceCatalog.java | 32 ++++----
.../java/org/apache/impala/catalog/Column.java | 12 +--
.../main/java/org/apache/impala/catalog/Db.java | 32 ++++----
.../org/apache/impala/catalog/DiskIdMapper.java | 14 ++--
.../apache/impala/catalog/FeCatalogUtils.java | 22 ++---
.../org/apache/impala/catalog/FeFsTable.java | 19 +++--
.../org/apache/impala/catalog/FeHBaseTable.java | 16 ++--
.../org/apache/impala/catalog/FeKuduTable.java | 8 +-
.../java/org/apache/impala/catalog/FeTable.java | 3 +-
.../org/apache/impala/catalog/Function.java | 4 +-
.../org/apache/impala/catalog/HBaseTable.java | 14 ++--
.../apache/impala/catalog/HdfsFileFormat.java | 5 +-
.../apache/impala/catalog/HdfsPartition.java | 20 +++--
.../impala/catalog/HdfsStorageDescriptor.java | 4 +-
.../org/apache/impala/catalog/HdfsTable.java | 84 ++++++++++----------
.../catalog/HiveStorageDescriptorFactory.java | 2 +-
.../catalog/ImpaladTableUsageTracker.java | 16 ++--
.../org/apache/impala/catalog/KuduTable.java | 8 +-
.../impala/catalog/PartitionStatsUtil.java | 24 +++---
.../apache/impala/catalog/PrimitiveType.java | 4 +-
.../apache/impala/catalog/ScalarFunction.java | 11 ++-
.../org/apache/impala/catalog/StructType.java | 16 ++--
.../java/org/apache/impala/catalog/Table.java | 17 ++--
.../apache/impala/catalog/TableLoadingMgr.java | 12 +--
.../apache/impala/catalog/TopicUpdateLog.java | 5 +-
.../java/org/apache/impala/catalog/Type.java | 33 ++++----
.../catalog/local/CatalogdMetaProvider.java | 10 +--
.../catalog/local/DirectMetaProvider.java | 3 +-
.../impala/catalog/local/LocalCatalog.java | 8 +-
.../apache/impala/catalog/local/LocalDb.java | 11 +--
.../impala/catalog/local/LocalFsTable.java | 22 ++---
.../impala/catalog/local/LocalHbaseTable.java | 10 +--
.../impala/catalog/local/LocalKuduTable.java | 2 +-
.../apache/impala/catalog/local/LocalTable.java | 6 +-
.../impala/planner/HdfsPartitionPruner.java | 6 +-
.../catalog/CatalogObjectToFromThriftTest.java | 3 +-
.../org/apache/impala/catalog/CatalogTest.java | 17 ++--
.../impala/catalog/HdfsPartitionTest.java | 21 ++---
.../catalog/HdfsStorageDescriptorTest.java | 32 ++++----
.../impala/catalog/PartialCatalogInfoTest.java | 6 +-
.../apache/impala/catalog/TestSchemaUtils.java | 3 +-
46 files changed, 328 insertions(+), 321 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java b/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java
index 7228efd..70d45b5 100644
--- a/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java
+++ b/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java
@@ -17,6 +17,7 @@
package org.apache.impala.catalog;
+import java.util.ArrayList;
import java.util.List;
import org.apache.impala.analysis.Expr;
@@ -28,7 +29,6 @@ import org.apache.impala.thrift.TFunction;
import org.apache.impala.thrift.TFunctionBinaryType;
import org.apache.impala.thrift.TSymbolLookupParams;
import org.apache.impala.thrift.TSymbolType;
-import org.apache.kudu.shaded.com.google.common.collect.Lists;
import com.google.common.base.Preconditions;
@@ -209,7 +209,7 @@ public class AggregateFunction extends Function {
*/
public static List<Expr> getCanonicalDistinctAggChildren(FunctionCallExpr aggFn) {
Preconditions.checkState(aggFn.isDistinct());
- List<Expr> result = Lists.newArrayList();
+ List<Expr> result = new ArrayList<>();
if (aggFn.getFnName().getFunction().equalsIgnoreCase("group_concat")) {
result.add(aggFn.getChild(0).ignoreImplicitCast());
} else {
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
index 4934505..5e9b11d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
+++ b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
@@ -17,11 +17,13 @@
package org.apache.impala.catalog;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import com.google.common.base.Preconditions;
import org.apache.commons.net.ntp.TimeStamp;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.common.InternalException;
@@ -30,17 +32,15 @@ import org.apache.impala.thrift.TColumn;
import org.apache.impala.thrift.TPrincipal;
import org.apache.impala.thrift.TPrincipalType;
import org.apache.impala.thrift.TPrivilege;
-import org.apache.impala.thrift.TResultRow;
import org.apache.impala.thrift.TResultSet;
import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.util.TResultRowBuilder;
import org.apache.log4j.Logger;
import org.apache.sentry.core.common.ActiveRoleSet;
import org.apache.sentry.provider.cache.PrivilegeCache;
-import org.apache.impala.util.TResultRowBuilder;
+import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
/**
@@ -80,12 +80,12 @@ public class AuthorizationPolicy implements PrivilegeCache {
private final CatalogObjectCache<User> userCache_ = new CatalogObjectCache<>(false);
// Map of principal ID -> user/role name. Used to match privileges to users/roles.
- private final Map<Integer, String> principalIds_ = Maps.newHashMap();
+ private final Map<Integer, String> principalIds_ = new HashMap<>();
// Map of group name (case sensitive) to set of role names (case insensitive) that
// have been granted to this group. Kept in sync with roleCache_. Provides efficient
// lookups of Role by group name.
- Map<String, Set<String>> groupsToRoles_ = Maps.newHashMap();
+ Map<String, Set<String>> groupsToRoles_ = new HashMap<>();
/**
* Adds a new principal to the policy. If a principal with the same name already
@@ -125,7 +125,7 @@ public class AuthorizationPolicy implements PrivilegeCache {
for (String groupName: principal.getGrantGroups()) {
Set<String> grantedRoles = groupsToRoles_.get(groupName);
if (grantedRoles == null) {
- grantedRoles = Sets.newHashSet();
+ grantedRoles = new HashSet<>();
groupsToRoles_.put(groupName, grantedRoles);
}
grantedRoles.add(principal.getName().toLowerCase());
@@ -268,7 +268,7 @@ public class AuthorizationPolicy implements PrivilegeCache {
* Gets all roles granted to the specified group.
*/
public synchronized List<Role> getGrantedRoles(String groupName) {
- List<Role> grantedRoles = Lists.newArrayList();
+ List<Role> grantedRoles = new ArrayList<>();
Set<String> roleNames = groupsToRoles_.get(groupName);
if (roleNames != null) {
for (String roleName: roleNames) {
@@ -379,7 +379,7 @@ public class AuthorizationPolicy implements PrivilegeCache {
role.addGrantGroup(groupName);
Set<String> grantedRoles = groupsToRoles_.get(groupName);
if (grantedRoles == null) {
- grantedRoles = Sets.newHashSet();
+ grantedRoles = new HashSet<>();
groupsToRoles_.put(groupName, grantedRoles);
}
grantedRoles.add(roleName.toLowerCase());
@@ -409,7 +409,7 @@ public class AuthorizationPolicy implements PrivilegeCache {
@Override
public synchronized Set<String> listPrivileges(Set<String> groups,
ActiveRoleSet roleSet) {
- Set<String> privileges = Sets.newHashSet();
+ Set<String> privileges = new HashSet<>();
if (roleSet != ActiveRoleSet.ALL) {
throw new UnsupportedOperationException("Impala does not support role subsets.");
}
@@ -473,7 +473,7 @@ public class AuthorizationPolicy implements PrivilegeCache {
TResultSet result = new TResultSet();
result.setSchema(new TResultSetMetadata());
addColumnOutputColumns(result.getSchema());
- result.setRows(Lists.<TResultRow>newArrayList());
+ result.setRows(new ArrayList<>());
Role role = getRole(principalName);
if (role != null) {
@@ -550,7 +550,7 @@ public class AuthorizationPolicy implements PrivilegeCache {
result.getSchema().addToColumns(new TColumn("principal_name",
Type.STRING.toThrift()));
addColumnOutputColumns(result.getSchema());
- result.setRows(Lists.<TResultRow>newArrayList());
+ result.setRows(new ArrayList<>());
// A user should be considered to not exist if they do not have any groups.
Set<String> groupNames = fe.getAuthzChecker().getUserGroups(
@@ -567,7 +567,7 @@ public class AuthorizationPolicy implements PrivilegeCache {
// Get the groups that user belongs to, get the roles those groups belong to and
// return those privileges as well.
- List<Role> roles = Lists.newArrayList();
+ List<Role> roles = new ArrayList<>();
for (String groupName: groupNames) {
roles.addAll(fe.getCatalog().getAuthPolicy().getGrantedRoles(groupName));
}
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java b/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java
index 5641fc3..8f831a8 100644
--- a/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java
+++ b/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java
@@ -804,7 +804,7 @@ public class BuiltinsDb extends Db {
Db db = this;
// Count (*)
db.addBuiltin(AggregateFunction.createBuiltin(db, "count",
- new ArrayList<Type>(), Type.BIGINT, Type.BIGINT,
+ new ArrayList<>(), Type.BIGINT, Type.BIGINT,
prefix + "8InitZeroIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextEPT_",
prefix + "15CountStarUpdateEPN10impala_udf15FunctionContextEPNS1_9BigIntValE",
prefix + "10CountMergeEPN10impala_udf15FunctionContextERKNS1_9BigIntValEPS4_",
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/Catalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Catalog.java b/fe/src/main/java/org/apache/impala/catalog/Catalog.java
index 2a8f5a0..1f46882 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Catalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Catalog.java
@@ -17,9 +17,11 @@
package org.apache.impala.catalog;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
@@ -36,7 +38,6 @@ import org.apache.impala.util.PatternMatcher;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
/**
* Thread safe interface for reading and updating metadata stored in the Hive MetaStore.
@@ -72,9 +73,8 @@ public abstract class Catalog {
// Thread safe cache of database metadata. Uses an AtomicReference so reset()
// operations can atomically swap dbCache_ references.
// TODO: Update this to use a CatalogObjectCache?
- protected AtomicReference<ConcurrentHashMap<String, Db>> dbCache_ =
- new AtomicReference<ConcurrentHashMap<String, Db>>(
- new ConcurrentHashMap<String, Db>());
+ protected AtomicReference<Map<String, Db>> dbCache_ =
+ new AtomicReference<>(new ConcurrentHashMap<String, Db>());
// Cache of data sources.
protected final CatalogObjectCache<DataSource> dataSources_;
@@ -338,7 +338,7 @@ public abstract class Catalog {
public static List<String> filterStringsByPattern(Iterable<String> candidates,
PatternMatcher matcher) {
Preconditions.checkNotNull(matcher);
- List<String> filtered = Lists.newArrayList();
+ List<String> filtered = new ArrayList<>();
for (String candidate: candidates) {
if (matcher.matches(candidate)) filtered.add(candidate);
}
@@ -363,7 +363,7 @@ public abstract class Catalog {
public static <T extends HasName> List<T> filterCatalogObjectsByPattern(
Iterable<? extends T> candidates, PatternMatcher matcher) {
Preconditions.checkNotNull(matcher);
- List<T> filtered = Lists.newArrayList();
+ List<T> filtered = new ArrayList<>();
for (T candidate: candidates) {
if (matcher.matches(candidate.getName())) filtered.add(candidate);
}
@@ -373,7 +373,7 @@ public abstract class Catalog {
public HdfsPartition getHdfsPartition(String dbName, String tableName,
org.apache.hadoop.hive.metastore.api.Partition msPart) throws CatalogException {
- List<TPartitionKeyValue> partitionSpec = Lists.newArrayList();
+ List<TPartitionKeyValue> partitionSpec = new ArrayList<>();
Table table = getTable(dbName, tableName);
if (!(table instanceof HdfsTable)) {
throw new PartitionNotFoundException(
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java b/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java
index e92c070..d613cf9 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java
@@ -19,6 +19,7 @@ package org.apache.impala.catalog;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@@ -55,8 +56,7 @@ public class CatalogObjectCache<T extends CatalogObject> implements Iterable<T>
// new entries may require two cache accesses that must be performed atomically.
// TODO: For simplicity, consider using a (non-concurrent) HashMap and marking
// all methods as synchronized.
- private final ConcurrentHashMap<String, T> metadataCache_ =
- new ConcurrentHashMap<String, T>();
+ private final Map<String, T> metadataCache_ = new ConcurrentHashMap<String, T>();
/**
* Adds a new catalogObject to the cache. If a catalogObject with the same name already
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
index 8b4cd8e..d9aa2a9 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
@@ -34,7 +34,6 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
@@ -62,8 +61,8 @@ import org.apache.impala.thrift.TFunction;
import org.apache.impala.thrift.TGetCatalogUsageResponse;
import org.apache.impala.thrift.TGetPartialCatalogObjectRequest;
import org.apache.impala.thrift.TGetPartialCatalogObjectResponse;
-import org.apache.impala.thrift.TPartialCatalogInfo;
import org.apache.impala.thrift.TGetPartitionStatsRequest;
+import org.apache.impala.thrift.TPartialCatalogInfo;
import org.apache.impala.thrift.TPartitionKeyValue;
import org.apache.impala.thrift.TPartitionStats;
import org.apache.impala.thrift.TPrincipalType;
@@ -73,8 +72,8 @@ import org.apache.impala.thrift.TTableName;
import org.apache.impala.thrift.TTableUsage;
import org.apache.impala.thrift.TTableUsageMetrics;
import org.apache.impala.thrift.TUniqueId;
-import org.apache.impala.util.FunctionUtils;
import org.apache.impala.thrift.TUpdateTableUsageRequest;
+import org.apache.impala.util.FunctionUtils;
import org.apache.impala.util.PatternMatcher;
import org.apache.impala.util.SentryProxy;
import org.apache.log4j.Logger;
@@ -83,10 +82,10 @@ import org.apache.thrift.TSerializer;
import org.apache.thrift.protocol.TBinaryProtocol;
import com.codahale.metrics.Timer;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
@@ -339,12 +338,13 @@ public class CatalogServiceCatalog extends Catalog {
incrementVersions_ = incrementVersions;
}
+ @Override
public void run() {
if (LOG.isTraceEnabled()) LOG.trace("Reloading cache pool names from HDFS");
// Map of cache pool name to CachePoolInfo. Stored in a map to allow Set operations
// to be performed on the keys.
- Map<String, CachePoolInfo> currentCachePools = Maps.newHashMap();
+ Map<String, CachePoolInfo> currentCachePools = new HashMap<>();
try {
DistributedFileSystem dfs = FileSystemUtil.getDistributedFileSystem();
RemoteIterator<CachePoolEntry> itr = dfs.listCachePools();
@@ -462,7 +462,7 @@ public class CatalogServiceCatalog extends Catalog {
// Table must be loaded.
Preconditions.checkState(table.isLoaded());
- Map<String, ByteBuffer> stats = Maps.newHashMap();
+ Map<String, ByteBuffer> stats = new HashMap<>();
HdfsTable hdfsTable = (HdfsTable) table;
hdfsTable.getLock().lock();
try {
@@ -938,8 +938,8 @@ public class CatalogServiceCatalog extends Catalog {
}
// Contains map of overloaded function names to all functions matching that name.
- HashMap<String, List<Function>> dbFns = db.getAllFunctions();
- List<Function> fns = new ArrayList<Function>(dbFns.size());
+ Map<String, List<Function>> dbFns = db.getAllFunctions();
+ List<Function> fns = new ArrayList<>(dbFns.size());
for (List<Function> fnOverloads: dbFns.values()) {
for (Function fn: fnOverloads) {
fns.add(fn);
@@ -1006,7 +1006,7 @@ public class CatalogServiceCatalog extends Catalog {
Db tmpDb;
try {
List<org.apache.hadoop.hive.metastore.api.Function> javaFns =
- Lists.newArrayList();
+ new ArrayList<>();
for (String javaFn : msClient.getHiveClient().getFunctions(dbName, "*")) {
javaFns.add(msClient.getHiveClient().getFunction(dbName, javaFn));
}
@@ -1068,7 +1068,7 @@ public class CatalogServiceCatalog extends Catalog {
MetaStoreClient msClient, String dbName, Db existingDb) {
try {
List<org.apache.hadoop.hive.metastore.api.Function> javaFns =
- Lists.newArrayList();
+ new ArrayList<>();
for (String javaFn: msClient.getHiveClient().getFunctions(dbName, "*")) {
javaFns.add(msClient.getHiveClient().getFunction(dbName, javaFn));
}
@@ -1091,7 +1091,7 @@ public class CatalogServiceCatalog extends Catalog {
loadJavaFunctions(newDb, javaFns);
newDb.setCatalogVersion(incrementAndGetCatalogVersion());
- List<TTableName> tblsToBackgroundLoad = Lists.newArrayList();
+ List<TTableName> tblsToBackgroundLoad = new ArrayList<>();
for (String tableName: msClient.getHiveClient().getAllTables(dbName)) {
Table incompleteTbl = IncompleteTable.createUninitializedTable(newDb, tableName);
incompleteTbl.setCatalogVersion(incrementAndGetCatalogVersion());
@@ -1184,8 +1184,8 @@ public class CatalogServiceCatalog extends Catalog {
// Build a new DB cache, populate it, and replace the existing cache in one
// step.
- ConcurrentHashMap<String, Db> newDbCache = new ConcurrentHashMap<String, Db>();
- List<TTableName> tblsToBackgroundLoad = Lists.newArrayList();
+ Map<String, Db> newDbCache = new ConcurrentHashMap<String, Db>();
+ List<TTableName> tblsToBackgroundLoad = new ArrayList<>();
try (MetaStoreClient msClient = getMetaStoreClient()) {
for (String dbName: msClient.getHiveClient().getAllDatabases()) {
dbName = dbName.toLowerCase();
@@ -1664,7 +1664,7 @@ public class CatalogServiceCatalog extends Catalog {
* If a user with the same name already exists it will be overwritten.
*/
public User addUser(String userName) {
- Principal user = addPrincipal(userName, Sets.<String>newHashSet(),
+ Principal user = addPrincipal(userName, new HashSet<>(),
TPrincipalType.USER);
Preconditions.checkState(user instanceof User);
return (User) user;
@@ -2073,8 +2073,8 @@ public class CatalogServiceCatalog extends Catalog {
*/
public TGetCatalogUsageResponse getCatalogUsage() {
TGetCatalogUsageResponse usage = new TGetCatalogUsageResponse();
- usage.setLarge_tables(Lists.<TTableUsageMetrics>newArrayList());
- usage.setFrequently_accessed_tables(Lists.<TTableUsageMetrics>newArrayList());
+ usage.setLarge_tables(new ArrayList<>());
+ usage.setFrequently_accessed_tables(new ArrayList<>());
for (Table largeTable: CatalogUsageMonitor.INSTANCE.getLargestTables()) {
TTableUsageMetrics tableUsageMetrics =
new TTableUsageMetrics(largeTable.getTableName().toThrift());
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/Column.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Column.java b/fe/src/main/java/org/apache/impala/catalog/Column.java
index bec5852..8acbe79 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Column.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Column.java
@@ -22,18 +22,17 @@ import java.util.List;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TColumnStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.impala.thrift.TColumn;
-import org.apache.impala.thrift.TColumnStats;
import com.google.common.base.Function;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
-import org.apache.impala.common.ImpalaRuntimeException;
-
/**
* Internal representation of column-related metadata.
* Owned by Catalog instance.
@@ -124,6 +123,7 @@ public class Column {
public static List<FieldSchema> toFieldSchemas(List<Column> columns) {
return Lists.transform(columns, new Function<Column, FieldSchema>() {
+ @Override
public FieldSchema apply(Column column) {
Preconditions.checkNotNull(column.getType());
return new FieldSchema(column.getName(), column.getType().toSql().toLowerCase(),
@@ -133,7 +133,7 @@ public class Column {
}
public static List<String> toColumnNames(List<Column> columns) {
- List<String> colNames = Lists.newArrayList();
+ List<String> colNames = new ArrayList<>();
for (Column col: columns) colNames.add(col.getName());
return colNames;
}
@@ -141,7 +141,7 @@ public class Column {
* Returns a struct type from the table columns passed in as a parameter.
*/
public static StructType columnsToStruct(List<Column> columns) {
- ArrayList<StructField> fields = Lists.newArrayListWithCapacity(columns.size());
+ List<StructField> fields = Lists.newArrayListWithCapacity(columns.size());
for (Column col: columns) {
fields.add(new StructField(col.getName(), col.getType(), col.getComment()));
}
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/Db.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Db.java b/fe/src/main/java/org/apache/impala/catalog/Db.java
index 6bad98c..2e26301 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Db.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Db.java
@@ -17,17 +17,13 @@
package org.apache.impala.catalog;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.codec.binary.Base64;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.impala.analysis.ColumnDef;
import org.apache.impala.analysis.KuduPartitionParam;
import org.apache.impala.common.ImpalaException;
@@ -43,11 +39,15 @@ import org.apache.impala.thrift.TGetPartialCatalogObjectResponse;
import org.apache.impala.thrift.TPartialDbInfo;
import org.apache.impala.util.FunctionUtils;
import org.apache.impala.util.PatternMatcher;
+import org.apache.thrift.TException;
+import org.apache.thrift.TSerializer;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
/**
* Internal representation of db-related metadata. Owned by Catalog instance.
@@ -84,7 +84,7 @@ public class Db extends CatalogObjectImpl implements FeDb {
// on this map. When a new Db object is initialized, this list is updated with the
// UDF/UDAs already persisted, if any, in the metastore DB. Functions are sorted in a
// canonical order defined by FunctionResolutionOrder.
- private final HashMap<String, List<Function>> functions_;
+ private final Map<String, List<Function>> functions_;
// If true, this database is an Impala system database.
// (e.g. can't drop it, can't add tables to it, etc).
@@ -94,7 +94,7 @@ public class Db extends CatalogObjectImpl implements FeDb {
thriftDb_ = new TDatabase(name.toLowerCase());
thriftDb_.setMetastore_db(msDb);
tableCache_ = new CatalogObjectCache<Table>();
- functions_ = new HashMap<String, List<Function>>();
+ functions_ = new HashMap<>();
}
public void setIsSystemDb(boolean b) { isSystemDb_ = b; }
@@ -113,7 +113,7 @@ public class Db extends CatalogObjectImpl implements FeDb {
org.apache.hadoop.hive.metastore.api.Database msDb = thriftDb_.metastore_db;
Preconditions.checkNotNull(msDb);
Map<String, String> hmsParams = msDb.getParameters();
- if (hmsParams == null) hmsParams = Maps.newHashMap();
+ if (hmsParams == null) hmsParams = new HashMap<>();
hmsParams.put(k,v);
msDb.setParameters(hmsParams);
}
@@ -147,6 +147,7 @@ public class Db extends CatalogObjectImpl implements FeDb {
/**
* Gets all table names in the table cache.
*/
+ @Override
public List<String> getAllTableNames() {
return Lists.newArrayList(tableCache_.keySet());
}
@@ -281,7 +282,7 @@ public class Db extends CatalogObjectImpl implements FeDb {
}
List<Function> fns = functions_.get(fn.functionName());
if (fns == null) {
- fns = Lists.newArrayList();
+ fns = new ArrayList<>();
functions_.put(fn.functionName(), fns);
}
if (addToDbParams && !addFunctionToDbParams(fn)) return false;
@@ -366,7 +367,7 @@ public class Db extends CatalogObjectImpl implements FeDb {
* This is not thread safe so a higher level lock must be taken while iterating
* over the returned functions.
*/
- public HashMap<String, List<Function>> getAllFunctions() {
+ public Map<String, List<Function>> getAllFunctions() {
return functions_;
}
@@ -374,7 +375,7 @@ public class Db extends CatalogObjectImpl implements FeDb {
* Returns a list of transient functions in this Db.
*/
protected List<Function> getTransientFunctions() {
- List<Function> result = Lists.newArrayList();
+ List<Function> result = new ArrayList<>();
synchronized (functions_) {
for (String fnKey: functions_.keySet()) {
for (Function fn: functions_.get(fnKey)) {
@@ -390,10 +391,11 @@ public class Db extends CatalogObjectImpl implements FeDb {
/**
* Returns all functions that match the pattern of 'matcher'.
*/
+ @Override
public List<Function> getFunctions(TFunctionCategory category,
PatternMatcher matcher) {
Preconditions.checkNotNull(matcher);
- List<Function> result = Lists.newArrayList();
+ List<Function> result = new ArrayList<>();
synchronized (functions_) {
for (Map.Entry<String, List<Function>> fns: functions_.entrySet()) {
if (!matcher.matches(fns.getKey())) continue;
@@ -416,7 +418,7 @@ public class Db extends CatalogObjectImpl implements FeDb {
Preconditions.checkNotNull(name);
synchronized (functions_) {
List<Function> candidates = functions_.get(name);
- if (candidates == null) return Lists.newArrayList();
+ if (candidates == null) return new ArrayList<>();
return FunctionUtils.getVisibleFunctions(candidates);
}
}
@@ -427,7 +429,7 @@ public class Db extends CatalogObjectImpl implements FeDb {
Preconditions.checkNotNull(name);
synchronized (functions_) {
List<Function> candidates = functions_.get(name);
- if (candidates == null) return Lists.newArrayList();
+ if (candidates == null) return new ArrayList<>();
return FunctionUtils.getVisibleFunctionsInCategory(candidates, category);
}
}
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/DiskIdMapper.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/DiskIdMapper.java b/fe/src/main/java/org/apache/impala/catalog/DiskIdMapper.java
index 99dc456..048c3c1 100644
--- a/fe/src/main/java/org/apache/impala/catalog/DiskIdMapper.java
+++ b/fe/src/main/java/org/apache/impala/catalog/DiskIdMapper.java
@@ -17,14 +17,14 @@
package org.apache.impala.catalog;
-import com.google.common.collect.Maps;
-import com.google.common.base.Strings;
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Shorts;
-
import java.util.HashMap;
+import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.primitives.Shorts;
+
/**
* A singleton class that maps HDFS storage-UUIDs to per-host 0-based, sequential disk
* ids. This mapping is internally implemented as a global static object shared
@@ -44,12 +44,12 @@ public class DiskIdMapper {
// Maps each storage ID UUID string returned by the BlockLocation API, to a per-node
// sequential 0-based disk id used by the BE scanners. This assumes that
// the storage ID of a particular disk is unique across all the nodes in the cluster.
- private ConcurrentHashMap<String, Short> storageUuidToDiskId_ =
+ private Map<String, Short> storageUuidToDiskId_ =
new ConcurrentHashMap<String, Short>();
// Per-host ID generator for storage UUID to Short ID mapping. This maps each host
// to the corresponding latest 0-based ID stored in a short.
- private final HashMap<String, Short> storageIdGenerator_ = Maps.newHashMap();
+ private final Map<String, Short> storageIdGenerator_ = new HashMap<>();
/**
* Returns a disk id (0-based) index for storageUuid on host 'host'. Generates a
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java b/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
index c50f406..9840a2c 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
@@ -17,13 +17,14 @@
package org.apache.impala.catalog;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import com.google.common.cache.CacheStats;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -33,9 +34,9 @@ import org.apache.impala.analysis.LiteralExpr;
import org.apache.impala.analysis.NullLiteral;
import org.apache.impala.analysis.PartitionKeyValue;
import org.apache.impala.analysis.ToSqlUtils;
-import org.apache.impala.catalog.local.CatalogdMetaProvider;
import org.apache.impala.catalog.CatalogObject.ThriftObjectType;
import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
+import org.apache.impala.catalog.local.CatalogdMetaProvider;
import org.apache.impala.catalog.local.LocalCatalog;
import org.apache.impala.catalog.local.MetaProvider;
import org.apache.impala.service.BackendConfig;
@@ -48,11 +49,10 @@ import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
+import com.google.common.cache.CacheStats;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
/**
* Static utility functions shared between FeCatalog implementations.
@@ -131,7 +131,7 @@ public abstract class FeCatalogUtils {
// TODO(todd): move to a default method in FeTable in Java8
public static List<TColumnDescriptor> getTColumnDescriptors(FeTable table) {
- List<TColumnDescriptor> colDescs = Lists.<TColumnDescriptor>newArrayList();
+ List<TColumnDescriptor> colDescs = new ArrayList<>();
for (Column col: table.getColumns()) {
colDescs.add(new TColumnDescriptor(col.getName(), col.getType().toThrift()));
}
@@ -226,7 +226,7 @@ public abstract class FeCatalogUtils {
"expected %s values but got %s",
hmsPartitionValues, table.getFullName(),
table.getNumClusteringCols(), hmsPartitionValues.size());
- List<LiteralExpr> keyValues = Lists.newArrayList();
+ List<LiteralExpr> keyValues = new ArrayList<>();
for (String partitionKey: hmsPartitionValues) {
Type type = table.getColumns().get(keyValues.size()).getType();
// Deal with Hive's special NULL partition key.
@@ -257,7 +257,7 @@ public abstract class FeCatalogUtils {
*/
public static String getPartitionName(FeFsPartition partition) {
FeFsTable table = partition.getTable();
- List<String> partitionCols = Lists.newArrayList();
+ List<String> partitionCols = new ArrayList<>();
for (int i = 0; i < table.getNumClusteringCols(); ++i) {
partitionCols.add(table.getColumns().get(i).getName());
}
@@ -269,7 +269,7 @@ public abstract class FeCatalogUtils {
// TODO: this could be a default method in FeFsPartition in Java 8.
public static List<String> getPartitionValuesAsStrings(
FeFsPartition partition, boolean mapNullsToHiveKey) {
- List<String> ret = Lists.newArrayList();
+ List<String> ret = new ArrayList<>();
for (LiteralExpr partValue: partition.getPartitionValues()) {
if (mapNullsToHiveKey) {
ret.add(PartitionKeyValue.getPartitionKeyValueString(
@@ -283,12 +283,12 @@ public abstract class FeCatalogUtils {
// TODO: this could be a default method in FeFsPartition in Java 8.
public static String getConjunctSqlForPartition(FeFsPartition part) {
- List<String> partColSql = Lists.newArrayList();
+ List<String> partColSql = new ArrayList<>();
for (Column partCol: part.getTable().getClusteringColumns()) {
partColSql.add(ToSqlUtils.getIdentSql(partCol.getName()));
}
- List<String> conjuncts = Lists.newArrayList();
+ List<String> conjuncts = new ArrayList<>();
for (int i = 0; i < partColSql.size(); ++i) {
LiteralExpr partVal = part.getPartitionValues().get(i);
String partValSql = partVal.toSql();
@@ -306,7 +306,7 @@ public abstract class FeCatalogUtils {
*/
public static Set<HdfsFileFormat> getFileFormats(
Iterable<? extends FeFsPartition> partitions) {
- Set<HdfsFileFormat> fileFormats = Sets.newHashSet();
+ Set<HdfsFileFormat> fileFormats = new HashSet<>();
for (FeFsPartition partition : partitions) {
fileFormats.add(partition.getFileFormat());
}
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
index 5c619a2..4b18770 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
@@ -16,8 +16,10 @@
// under the License.
package org.apache.impala.catalog;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@@ -36,7 +38,6 @@ import org.apache.impala.service.BackendConfig;
import org.apache.impala.thrift.TColumn;
import org.apache.impala.thrift.TNetworkAddress;
import org.apache.impala.thrift.TPartitionKeyValue;
-import org.apache.impala.thrift.TResultRow;
import org.apache.impala.thrift.TResultSet;
import org.apache.impala.thrift.TResultSetMetadata;
import org.apache.impala.thrift.TTableStats;
@@ -46,8 +47,6 @@ import org.apache.impala.util.TResultRowBuilder;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
/**
* Frontend interface for interacting with a filesystem-backed table.
@@ -158,7 +157,7 @@ public interface FeFsTable extends FeTable {
* @return a map from value to a set of partitions for which column 'col'
* has that value.
*/
- TreeMap<LiteralExpr, HashSet<Long>> getPartitionValueMap(int col);
+ TreeMap<LiteralExpr, Set<Long>> getPartitionValueMap(int col);
/**
* @return the set of partitions which have a null value for column
@@ -242,7 +241,7 @@ public interface FeFsTable extends FeTable {
resultSchema.addToColumns(new TColumn("Path", Type.STRING.toThrift()));
resultSchema.addToColumns(new TColumn("Size", Type.STRING.toThrift()));
resultSchema.addToColumns(new TColumn("Partition", Type.STRING.toThrift()));
- result.setRows(Lists.<TResultRow>newArrayList());
+ result.setRows(new ArrayList<>());
List<? extends FeFsPartition> orderedPartitions;
if (partitionSet == null) {
@@ -338,14 +337,14 @@ public interface FeFsTable extends FeTable {
// selected.
Random rnd = new Random(randomSeed);
long selectedBytes = 0;
- Map<Long, List<FileDescriptor>> result = Maps.newHashMap();
+ Map<Long, List<FileDescriptor>> result = new HashMap<>();
while (selectedBytes < targetBytes && numFilesRemaining > 0) {
int selectedIdx = Math.abs(rnd.nextInt()) % numFilesRemaining;
FeFsPartition part = parts[selectedIdx];
Long partId = Long.valueOf(part.getId());
List<FileDescriptor> sampleFileIdxs = result.get(partId);
if (sampleFileIdxs == null) {
- sampleFileIdxs = Lists.newArrayList();
+ sampleFileIdxs = new ArrayList<>();
result.put(partId, sampleFileIdxs);
}
FileDescriptor fd = part.getFileDescriptors().get(fileIdxs[selectedIdx]);
@@ -364,7 +363,7 @@ public interface FeFsTable extends FeTable {
*/
public static List<? extends FeFsPartition> getPartitionsFromPartitionSet(
FeFsTable table, List<List<TPartitionKeyValue>> partitionSet) {
- List<Long> partitionIds = Lists.newArrayList();
+ List<Long> partitionIds = new ArrayList<>();
for (List<TPartitionKeyValue> kv : partitionSet) {
PrunablePartition partition = getPartitionFromThriftPartitionSpec(table, kv);
if (partition != null) partitionIds.add(partition.getId());
@@ -381,8 +380,8 @@ public interface FeFsTable extends FeTable {
List<TPartitionKeyValue> partitionSpec) {
// First, build a list of the partition values to search for in the same order they
// are defined in the table.
- List<String> targetValues = Lists.newArrayList();
- Set<String> keys = Sets.newHashSet();
+ List<String> targetValues = new ArrayList<>();
+ Set<String> keys = new HashSet<>();
for (FieldSchema fs: table.getMetaStoreTable().getPartitionKeys()) {
for (TPartitionKeyValue kv: partitionSpec) {
if (fs.getName().toLowerCase().equals(kv.getName().toLowerCase())) {
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/FeHBaseTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeHBaseTable.java b/fe/src/main/java/org/apache/impala/catalog/FeHBaseTable.java
index e8fd752..8d481a1 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeHBaseTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeHBaseTable.java
@@ -17,8 +17,12 @@
package org.apache.impala.catalog;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.ClusterStatus;
@@ -54,11 +58,7 @@ import org.apache.impala.util.StatsHelper;
import org.apache.impala.util.TResultRowBuilder;
import org.apache.log4j.Logger;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
+import com.google.common.base.Preconditions;
public interface FeHBaseTable extends FeTable {
/**
@@ -161,7 +161,7 @@ public interface FeHBaseTable extends FeTable {
// Populate tmp cols in the order they appear in the Hive metastore.
// We will reorder the cols below.
- List<HBaseColumn> tmpCols = Lists.newArrayList();
+ List<HBaseColumn> tmpCols = new ArrayList<>();
// Store the key column separately.
// TODO: Change this to an ArrayList once we support composite row keys.
HBaseColumn keyCol = null;
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/FeKuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeKuduTable.java b/fe/src/main/java/org/apache/impala/catalog/FeKuduTable.java
index dc5f45d..9f04f25 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeKuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeKuduTable.java
@@ -17,6 +17,7 @@
package org.apache.impala.catalog;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -40,7 +41,6 @@ import org.apache.kudu.client.PartitionSchema.RangeSchema;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
/**
* Frontend interface for interacting with a Kudu-backed table.
@@ -100,13 +100,13 @@ public interface FeKuduTable extends FeTable {
public static List<KuduPartitionParam> loadPartitionByParams(
org.apache.kudu.client.KuduTable kuduTable) {
- List<KuduPartitionParam> ret = Lists.newArrayList();
+ List<KuduPartitionParam> ret = new ArrayList<>();
Preconditions.checkNotNull(kuduTable);
Schema tableSchema = kuduTable.getSchema();
PartitionSchema partitionSchema = kuduTable.getPartitionSchema();
for (HashBucketSchema hashBucketSchema: partitionSchema.getHashBucketSchemas()) {
- List<String> columnNames = Lists.newArrayList();
+ List<String> columnNames = new ArrayList<>();
for (int colId: hashBucketSchema.getColumnIds()) {
columnNames.add(getColumnNameById(tableSchema, colId));
}
@@ -116,7 +116,7 @@ public interface FeKuduTable extends FeTable {
RangeSchema rangeSchema = partitionSchema.getRangeSchema();
List<Integer> columnIds = rangeSchema.getColumns();
if (columnIds.isEmpty()) return ret;
- List<String> columnNames = Lists.newArrayList();
+ List<String> columnNames = new ArrayList<>();
for (int colId: columnIds) columnNames.add(getColumnNameById(tableSchema, colId));
// We don't populate the split values because Kudu's API doesn't currently support
// retrieving the split values for range partitions.
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/FeTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeTable.java b/fe/src/main/java/org/apache/impala/catalog/FeTable.java
index a60b827..d395d48 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeTable.java
@@ -16,7 +16,6 @@
// under the License.
package org.apache.impala.catalog;
-import java.util.ArrayList;
import java.util.List;
import java.util.Set;
@@ -68,7 +67,7 @@ public interface FeTable {
/**
* @return the columns in this table
*/
- ArrayList<Column> getColumns();
+ List<Column> getColumns();
/**
* @return an unmodifiable list of all columns, but with partition columns at the end of
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/Function.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Function.java b/fe/src/main/java/org/apache/impala/catalog/Function.java
index 582733e..a7fe68c 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Function.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Function.java
@@ -17,6 +17,7 @@
package org.apache.impala.catalog;
+import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.NotImplementedException;
@@ -39,7 +40,6 @@ import org.apache.impala.thrift.TSymbolType;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
/**
@@ -333,7 +333,7 @@ public class Function extends CatalogObjectImpl {
Preconditions.checkArgument(fn.isSetArg_types());
Preconditions.checkArgument(fn.isSetRet_type());
Preconditions.checkArgument(fn.isSetHas_var_args());
- List<Type> argTypes = Lists.newArrayList();
+ List<Type> argTypes = new ArrayList<>();
for (TColumnType t: fn.getArg_types()) {
argTypes.add(Type.fromThrift(t));
}
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java b/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
index 9550253..0b81b5d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
@@ -17,8 +17,10 @@
package org.apache.impala.catalog;
-import com.codahale.metrics.Timer;
-import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
@@ -29,10 +31,8 @@ import org.apache.impala.thrift.TTable;
import org.apache.impala.thrift.TTableDescriptor;
import org.apache.impala.thrift.TTableType;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
+import com.codahale.metrics.Timer;
+import com.google.common.base.Preconditions;
/**
* Impala representation of HBase table metadata,
@@ -146,7 +146,7 @@ public class HBaseTable extends Table implements FeHBaseTable {
* Hive returns the columns in order of their declaration for HBase tables.
*/
@Override
- public ArrayList<Column> getColumnsInHiveOrder() {
+ public List<Column> getColumnsInHiveOrder() {
return getColumns();
}
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java b/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java
index 8c5b17d..5bffbd5 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java
@@ -17,13 +17,14 @@
package org.apache.impala.catalog;
+import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.impala.thrift.THdfsFileFormat;
+
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
/**
* Supported HDFS file formats. Every file format specifies:
@@ -234,7 +235,7 @@ public enum HdfsFileFormat {
* Returns a list with all formats for which isComplexTypesSupported() is true.
*/
public static List<HdfsFileFormat> complexTypesFormats() {
- List<HdfsFileFormat> result = Lists.newArrayList();
+ List<HdfsFileFormat> result = new ArrayList<>();
for (HdfsFileFormat f: values()) {
if (f.isComplexTypesSupported()) result.add(f);
}
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index e497b86..e87608f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -19,8 +19,10 @@ package org.apache.impala.catalog;
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -607,6 +609,7 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
public void setNumRows(long numRows) { numRows_ = numRows; }
@Override // FeFsPartition
public long getNumRows() { return numRows_; }
+ @Override
public boolean isMarkedCached() { return isMarkedCached_; }
void markCached() { isMarkedCached_ = true; }
@@ -641,6 +644,7 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
return PartitionStatsUtil.getPartStatsOrWarn(this);
}
+ @Override
public byte[] getPartitionStatsCompressed() {
return partitionStats_;
}
@@ -850,7 +854,7 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
msPartition.getParameters()) != null;
hmsParameters_ = msPartition.getParameters();
} else {
- hmsParameters_ = Maps.newHashMap();
+ hmsParameters_ = new HashMap<>();
}
extractAndCompressPartStats();
}
@@ -871,8 +875,8 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
public static HdfsPartition prototypePartition(
HdfsTable table, HdfsStorageDescriptor storageDescriptor) {
- List<LiteralExpr> emptyExprList = Lists.newArrayList();
- List<FileDescriptor> emptyFileDescriptorList = Lists.newArrayList();
+ List<LiteralExpr> emptyExprList = new ArrayList<>();
+ List<FileDescriptor> emptyFileDescriptorList = new ArrayList<>();
return new HdfsPartition(table, null, emptyExprList,
storageDescriptor, emptyFileDescriptorList,
CatalogObjectsConstants.PROTOTYPE_PARTITION_ID, null,
@@ -900,14 +904,14 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
HdfsStorageDescriptor storageDesc = HdfsStorageDescriptor.fromThriftPartition(
thriftPartition, table.getName());
- List<LiteralExpr> literalExpr = Lists.newArrayList();
+ List<LiteralExpr> literalExpr = new ArrayList<>();
if (id != CatalogObjectsConstants.PROTOTYPE_PARTITION_ID) {
- List<Column> clusterCols = Lists.newArrayList();
+ List<Column> clusterCols = new ArrayList<>();
for (int i = 0; i < table.getNumClusteringCols(); ++i) {
clusterCols.add(table.getColumns().get(i));
}
- List<TExprNode> exprNodes = Lists.newArrayList();
+ List<TExprNode> exprNodes = new ArrayList<>();
for (TExpr expr: thriftPartition.getPartitionKeyExprs()) {
for (TExprNode node: expr.getNodes()) {
exprNodes.add(node);
@@ -924,7 +928,7 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
}
}
- List<HdfsPartition.FileDescriptor> fileDescriptors = Lists.newArrayList();
+ List<HdfsPartition.FileDescriptor> fileDescriptors = new ArrayList<>();
if (thriftPartition.isSetFile_desc()) {
for (THdfsFileDesc desc: thriftPartition.getFile_desc()) {
fileDescriptors.add(HdfsPartition.FileDescriptor.fromThrift(desc));
@@ -949,7 +953,7 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
if (thriftPartition.isSetHms_parameters()) {
partition.hmsParameters_ = thriftPartition.getHms_parameters();
} else {
- partition.hmsParameters_ = Maps.newHashMap();
+ partition.hmsParameters_ = new HashMap<>();
}
partition.hasIncrementalStats_ = thriftPartition.has_incremental_stats;
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java b/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java
index d1c0f9d..f53f3b0 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java
@@ -17,6 +17,7 @@
package org.apache.impala.catalog;
+import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
@@ -31,7 +32,6 @@ import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Interner;
import com.google.common.collect.Interners;
-import com.google.common.collect.Maps;
import com.google.errorprone.annotations.Immutable;
/**
@@ -100,7 +100,7 @@ public class HdfsStorageDescriptor {
// which means we need to use a default instead.
// We tried long and hard to find default values for delimiters in Hive,
// but could not find them.
- Map<String, Byte> delimMap = Maps.newHashMap();
+ Map<String, Byte> delimMap = new HashMap<>();
for (String delimKey: DELIMITER_KEYS) {
String delimValue = serdeInfo.getParameters().get(delimKey);
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index f2fd897..82b8ef3 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -36,6 +36,7 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
+
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
@@ -175,21 +176,21 @@ public class HdfsTable extends Table implements FeFsTable {
// Array of sorted maps storing the association between partition values and
// partition ids. There is one sorted map per partition key. It is only populated if
// this table object is stored in ImpaladCatalog.
- private final ArrayList<TreeMap<LiteralExpr, HashSet<Long>>> partitionValuesMap_ =
- Lists.newArrayList();
+ private final List<TreeMap<LiteralExpr, Set<Long>>> partitionValuesMap_ =
+ new ArrayList<>();
// Array of partition id sets that correspond to partitions with null values
// in the partition keys; one set per partition key. It is not populated if the table is
// stored in the catalog server.
- private final ArrayList<HashSet<Long>> nullPartitionIds_ = Lists.newArrayList();
+ private final List<Set<Long>> nullPartitionIds_ = new ArrayList<>();
// Map of partition ids to HdfsPartitions.
- private final HashMap<Long, HdfsPartition> partitionMap_ = Maps.newHashMap();
+ private final Map<Long, HdfsPartition> partitionMap_ = new HashMap<>();
// Map of partition name to HdfsPartition object. Used for speeding up
// table metadata loading. It is only populated if this table object is stored in
// catalog server.
- private final HashMap<String, HdfsPartition> nameToPartitionMap_ = Maps.newHashMap();
+ private final Map<String, HdfsPartition> nameToPartitionMap_ = new HashMap<>();
// The partition used as a prototype when creating new partitions during
// insertion. New partitions inherit file format and other settings from
@@ -223,7 +224,7 @@ public class HdfsTable extends Table implements FeFsTable {
// List of FieldSchemas that correspond to the non-partition columns. Used when
// describing this table and its partitions to the HMS (e.g. as part of an alter table
// operation), when only non-partition columns are required.
- private final List<FieldSchema> nonPartFieldSchemas_ = Lists.newArrayList();
+ private final List<FieldSchema> nonPartFieldSchemas_ = new ArrayList<>();
// Flag to check if the table schema has been loaded. Used as a precondition
// for setAvroSchema().
@@ -448,7 +449,7 @@ public class HdfsTable extends Table implements FeFsTable {
FileMetadataLoadStats loadStats) throws IOException {
boolean supportsBlocks = FileSystemUtil.supportsStorageIds(fs);
Reference<Long> numUnknownDiskIds = new Reference<Long>(Long.valueOf(0));
- List<FileDescriptor> newFileDescs = Lists.newArrayList();
+ List<FileDescriptor> newFileDescs = new ArrayList<>();
while (fileStatusIter.hasNext()) {
LocatedFileStatus fileStatus = fileStatusIter.next();
if (!FileSystemUtil.isValidDataFile(fileStatus)) {
@@ -501,7 +502,7 @@ public class HdfsTable extends Table implements FeFsTable {
if (fileStatuses == null) return loadStats;
boolean supportsBlocks = FileSystemUtil.supportsStorageIds(fs);
Reference<Long> numUnknownDiskIds = new Reference<Long>(Long.valueOf(0));
- List<FileDescriptor> newFileDescs = Lists.newArrayList();
+ List<FileDescriptor> newFileDescs = new ArrayList<>();
// If there is a cached partition mapped to this path, we recompute the block
// locations even if the underlying files have not changed (hasFileChanged()).
// This is done to keep the cached block metadata up to date.
@@ -623,7 +624,7 @@ public class HdfsTable extends Table implements FeFsTable {
}
@Override // FeFsTable
- public TreeMap<LiteralExpr, HashSet<Long>> getPartitionValueMap(int i) {
+ public TreeMap<LiteralExpr, Set<Long>> getPartitionValueMap(int i) {
return partitionValuesMap_.get(i);
}
@@ -674,7 +675,7 @@ public class HdfsTable extends Table implements FeFsTable {
public static PrunablePartition getPartition(FeFsTable table,
List<PartitionKeyValue> partitionSpec) {
- List<TPartitionKeyValue> partitionKeyValues = Lists.newArrayList();
+ List<TPartitionKeyValue> partitionKeyValues = new ArrayList<>();
for (PartitionKeyValue kv: partitionSpec) {
Preconditions.checkArgument(kv.isStatic(), "unexpected dynamic partition: %s",
kv);
@@ -739,8 +740,8 @@ public class HdfsTable extends Table implements FeFsTable {
for (int i = 0; i < numClusteringCols_; ++i) {
getColumns().get(i).getStats().setNumNulls(0);
getColumns().get(i).getStats().setNumDistinctValues(0);
- partitionValuesMap_.add(Maps.<LiteralExpr, HashSet<Long>>newTreeMap());
- nullPartitionIds_.add(Sets.<Long>newHashSet());
+ partitionValuesMap_.add(new TreeMap<>());
+ nullPartitionIds_.add(new HashSet<>());
}
}
fileMetadataStats_.init();
@@ -780,7 +781,7 @@ public class HdfsTable extends Table implements FeFsTable {
// Map of partition paths to their corresponding HdfsPartition objects. Populated
// using createPartition() calls. A single partition path can correspond to multiple
// partitions.
- HashMap<Path, List<HdfsPartition>> partsByPath = Maps.newHashMap();
+ Map<Path, List<HdfsPartition>> partsByPath = new HashMap<>();
FsPermissionCache permCache = preloadPermissionsCache(msPartitions);
Path tblLocation = FileSystemUtil.createFullyQualifiedPath(getHdfsBaseDirPath());
@@ -864,7 +865,7 @@ public class HdfsTable extends Table implements FeFsTable {
threadPoolSize);
ExecutorService partitionLoadingPool = Executors.newFixedThreadPool(threadPoolSize);
try {
- List<Future<FileMetadataLoadStats>> pendingMdLoadTasks = Lists.newArrayList();
+ List<Future<FileMetadataLoadStats>> pendingMdLoadTasks = new ArrayList<>();
for (Path p: partsByPath.keySet()) {
FileMetadataLoadRequest blockMdLoadReq =
new FileMetadataLoadRequest(p, partsByPath.get(p), reuseFileMd);
@@ -961,8 +962,8 @@ public class HdfsTable extends Table implements FeFsTable {
public List<HdfsPartition> createAndLoadPartitions(
List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions)
throws CatalogException {
- HashMap<Path, List<HdfsPartition>> partsByPath = Maps.newHashMap();
- List<HdfsPartition> addedParts = Lists.newArrayList();
+ Map<Path, List<HdfsPartition>> partsByPath = new HashMap<>();
+ List<HdfsPartition> addedParts = new ArrayList<>();
FsPermissionCache permCache = preloadPermissionsCache(msPartitions);
for (org.apache.hadoop.hive.metastore.api.Partition partition: msPartitions) {
HdfsPartition hdfsPartition = createPartition(partition.getSd(), partition,
@@ -1063,9 +1064,9 @@ public class HdfsTable extends Table implements FeFsTable {
nullPartitionIds_.get(i).add(Long.valueOf(partition.getId()));
continue;
}
- HashSet<Long> partitionIds = partitionValuesMap_.get(i).get(literal);
+ Set<Long> partitionIds = partitionValuesMap_.get(i).get(literal);
if (partitionIds == null) {
- partitionIds = Sets.newHashSet();
+ partitionIds = new HashSet<>();
partitionValuesMap_.get(i).put(literal, partitionIds);
stats.setNumDistinctValues(stats.getNumDistinctValues() + 1);
}
@@ -1121,7 +1122,7 @@ public class HdfsTable extends Table implements FeFsTable {
}
continue;
}
- HashSet<Long> partitionIds = partitionValuesMap_.get(i).get(literal);
+ Set<Long> partitionIds = partitionValuesMap_.get(i).get(literal);
// If there are multiple partition ids corresponding to a literal, remove
// only this id. Otherwise, remove the <literal, id> pair.
if (partitionIds.size() > 1) partitionIds.remove(partitionId);
@@ -1144,7 +1145,7 @@ public class HdfsTable extends Table implements FeFsTable {
*/
public List<HdfsPartition> dropPartitions(List<HdfsPartition> partitions,
boolean removeCacheDirective) {
- ArrayList<HdfsPartition> droppedPartitions = Lists.newArrayList();
+ List<HdfsPartition> droppedPartitions = new ArrayList<>();
for (HdfsPartition partition: partitions) {
HdfsPartition hdfsPartition = dropPartition(partition, removeCacheDirective);
if (hdfsPartition != null) droppedPartitions.add(hdfsPartition);
@@ -1320,16 +1321,16 @@ public class HdfsTable extends Table implements FeFsTable {
// identify the delta between partitions of the local HdfsTable and the table entry
// in the Hive Metastore. Note: This is a relatively "cheap" operation
// (~.3 secs for 30K partitions).
- Set<String> msPartitionNames = Sets.newHashSet();
+ Set<String> msPartitionNames = new HashSet<>();
msPartitionNames.addAll(client.listPartitionNames(db_.getName(), name_, (short) -1));
// Names of loaded partitions in this table
- Set<String> partitionNames = Sets.newHashSet();
+ Set<String> partitionNames = new HashSet<>();
// Partitions for which file metadata must be loaded, grouped by partition paths.
- Map<Path, List<HdfsPartition>> partitionsToUpdateFileMdByPath = Maps.newHashMap();
+ Map<Path, List<HdfsPartition>> partitionsToUpdateFileMdByPath = new HashMap<>();
// Partitions that need to be dropped and recreated from scratch
- List<HdfsPartition> dirtyPartitions = Lists.newArrayList();
+ List<HdfsPartition> dirtyPartitions = new ArrayList<>();
// Partitions removed from the Hive Metastore.
- List<HdfsPartition> removedPartitions = Lists.newArrayList();
+ List<HdfsPartition> removedPartitions = new ArrayList<>();
// Identify dirty partitions that need to be loaded from the Hive Metastore and
// partitions that no longer exist in the Hive Metastore.
for (HdfsPartition partition: partitionMap_.values()) {
@@ -1392,9 +1393,9 @@ public class HdfsTable extends Table implements FeFsTable {
* Given a set of partition names, returns the corresponding HdfsPartition
* objects grouped by their base directory path.
*/
- private HashMap<Path, List<HdfsPartition>> getPartitionsByPath(
+ private Map<Path, List<HdfsPartition>> getPartitionsByPath(
Collection<String> partitionNames) {
- HashMap<Path, List<HdfsPartition>> partsByPath = Maps.newHashMap();
+ Map<Path, List<HdfsPartition>> partsByPath = new HashMap<>();
for (String partitionName: partitionNames) {
String partName = DEFAULT_PARTITION_NAME;
if (partitionName.length() > 0) {
@@ -1480,7 +1481,7 @@ public class HdfsTable extends Table implements FeFsTable {
|| hasAvroData_) {
// Look for Avro schema in TBLPROPERTIES and in SERDEPROPERTIES, with the latter
// taking precedence.
- List<Map<String, String>> schemaSearchLocations = Lists.newArrayList();
+ List<Map<String, String>> schemaSearchLocations = new ArrayList<>();
schemaSearchLocations.add(
getMetaStoreTable().getSd().getSerdeInfo().getParameters());
schemaSearchLocations.add(getMetaStoreTable().getParameters());
@@ -1558,7 +1559,7 @@ public class HdfsTable extends Table implements FeFsTable {
LOG.trace(String.format("Incrementally updating %d/%d partitions.",
partitions.size(), partitionMap_.size()));
}
- Set<String> partitionNames = Sets.newHashSet();
+ Set<String> partitionNames = new HashSet<>();
for (HdfsPartition part: partitions) {
partitionNames.add(part.getPartitionName());
}
@@ -1575,7 +1576,7 @@ public class HdfsTable extends Table implements FeFsTable {
if (partitionNames.isEmpty()) return;
// Load partition metadata from Hive Metastore.
List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions =
- Lists.newArrayList();
+ new ArrayList<>();
msPartitions.addAll(MetaStoreUtil.fetchPartitionsByName(client,
Lists.newArrayList(partitionNames), db_.getName(), name_));
@@ -1652,7 +1653,7 @@ public class HdfsTable extends Table implements FeFsTable {
@Override
protected List<String> getColumnNamesWithHmsStats() {
- List<String> ret = Lists.newArrayList();
+ List<String> ret = new ArrayList<>();
// Only non-partition columns have column stats in the HMS.
for (Column column: getColumns().subList(numClusteringCols_, getColumns().size())) {
ret.add(column.getName().toLowerCase());
@@ -1812,7 +1813,7 @@ public class HdfsTable extends Table implements FeFsTable {
memUsageEstimate += numPartitions * PER_PARTITION_MEM_USAGE_BYTES;
boolean includeIncrementalStats = shouldSendIncrementalStats(numPartitions);
FileMetadataStats stats = new FileMetadataStats();
- Map<Long, THdfsPartition> idToPartition = Maps.newHashMap();
+ Map<Long, THdfsPartition> idToPartition = new HashMap<>();
for (HdfsPartition partition: partitionMap_.values()) {
long id = partition.getId();
if (refPartitions == null || refPartitions.contains(id)) {
@@ -1868,6 +1869,7 @@ public class HdfsTable extends Table implements FeFsTable {
/**
* Returns the set of file formats that the partitions are stored in.
*/
+ @Override
public Set<HdfsFileFormat> getFileFormats() {
// In the case that we have no partitions added to the table yet, it's
// important to add the "prototype" partition as a fallback.
@@ -1882,18 +1884,18 @@ public class HdfsTable extends Table implements FeFsTable {
* partition key column.
*/
public List<List<String>> getPathsWithoutPartitions() throws CatalogException {
- HashSet<List<LiteralExpr>> existingPartitions = new HashSet<List<LiteralExpr>>();
+ Set<List<LiteralExpr>> existingPartitions = new HashSet<>();
// Get the list of partition values of existing partitions in Hive Metastore.
for (HdfsPartition partition: partitionMap_.values()) {
existingPartitions.add(partition.getPartitionValues());
}
- List<String> partitionKeys = Lists.newArrayList();
+ List<String> partitionKeys = new ArrayList<>();
for (int i = 0; i < numClusteringCols_; ++i) {
partitionKeys.add(getColumns().get(i).getName());
}
Path basePath = new Path(hdfsBaseDir_);
- List<List<String>> partitionsNotInHms = new ArrayList<List<String>>();
+ List<List<String>> partitionsNotInHms = new ArrayList<>();
try {
getAllPartitionsNotInHms(basePath, partitionKeys, existingPartitions,
partitionsNotInHms);
@@ -1909,11 +1911,11 @@ public class HdfsTable extends Table implements FeFsTable {
* type compatibility check. Also these partitions are not already part of the table.
*/
private void getAllPartitionsNotInHms(Path path, List<String> partitionKeys,
- HashSet<List<LiteralExpr>> existingPartitions,
+ Set<List<LiteralExpr>> existingPartitions,
List<List<String>> partitionsNotInHms) throws IOException {
FileSystem fs = path.getFileSystem(CONF);
- List<String> partitionValues = Lists.newArrayList();
- List<LiteralExpr> partitionExprs = Lists.newArrayList();
+ List<String> partitionValues = new ArrayList<>();
+ List<LiteralExpr> partitionExprs = new ArrayList<>();
getAllPartitionsNotInHms(path, partitionKeys, 0, fs, partitionValues,
partitionExprs, existingPartitions, partitionsNotInHms);
}
@@ -1934,7 +1936,7 @@ public class HdfsTable extends Table implements FeFsTable {
*/
private void getAllPartitionsNotInHms(Path path, List<String> partitionKeys,
int depth, FileSystem fs, List<String> partitionValues,
- List<LiteralExpr> partitionExprs, HashSet<List<LiteralExpr>> existingPartitions,
+ List<LiteralExpr> partitionExprs, Set<List<LiteralExpr>> existingPartitions,
List<List<String>> partitionsNotInHms) throws IOException {
if (depth == partitionKeys.size()) {
if (existingPartitions.contains(partitionExprs)) {
@@ -2139,8 +2141,8 @@ public class HdfsTable extends Table implements FeFsTable {
* Constructs a partition name from a list of TPartitionKeyValue objects.
*/
public static String constructPartitionName(List<TPartitionKeyValue> partitionSpec) {
- List<String> partitionCols = Lists.newArrayList();
- List<String> partitionVals = Lists.newArrayList();
+ List<String> partitionCols = new ArrayList<>();
+ List<String> partitionVals = new ArrayList<>();
for (TPartitionKeyValue kv: partitionSpec) {
partitionCols.add(kv.getName());
partitionVals.add(kv.getValue());
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java b/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java
index b1bd003..c649808 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java
@@ -39,7 +39,7 @@ public class HiveStorageDescriptorFactory {
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new org.apache.hadoop.hive.metastore.api.SerDeInfo());
- sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+ sd.getSerdeInfo().setParameters(new HashMap<>());
// The compressed flag is not used to determine whether the table is compressed or
// not. Instead, we use the input format or the filename.
sd.setCompressed(false);
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/ImpaladTableUsageTracker.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ImpaladTableUsageTracker.java b/fe/src/main/java/org/apache/impala/catalog/ImpaladTableUsageTracker.java
index 2d6ccda..5db822b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ImpaladTableUsageTracker.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ImpaladTableUsageTracker.java
@@ -17,8 +17,12 @@
package org.apache.impala.catalog;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
import org.apache.impala.analysis.TableName;
import org.apache.impala.common.JniUtil;
import org.apache.impala.service.BackendConfig;
@@ -32,10 +36,8 @@ import org.apache.log4j.Logger;
import org.apache.thrift.TSerializer;
import org.apache.thrift.protocol.TBinaryProtocol;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Random;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
/**
* Track the names and the number of usages of the recently used tables and report the
@@ -44,7 +46,7 @@ import java.util.Random;
public class ImpaladTableUsageTracker {
private static final Logger LOG = Logger.getLogger(ImpaladTableUsageTracker.class);
private final static long REPORT_INTERVAL_MS = 10000;
- private HashMap<TTableName, TTableUsage> unreportedUsages;
+ private Map<TTableName, TTableUsage> unreportedUsages;
private Thread reportThread_;
private ImpaladTableUsageTracker(boolean enabled) {
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
index 2a64f5d..1b5defd 100644
--- a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
@@ -92,7 +92,7 @@ public class KuduTable extends Table implements FeKuduTable {
private String kuduMasters_;
// Primary key column names, the column names are all in lower case.
- private final List<String> primaryKeyColumnNames_ = Lists.newArrayList();
+ private final List<String> primaryKeyColumnNames_ = new ArrayList<>();
// Partitioning schemes of this Kudu table. Both range and hash-based partitioning are
// supported.
@@ -118,7 +118,7 @@ public class KuduTable extends Table implements FeKuduTable {
* Returns the columns in the order they have been created
*/
@Override
- public ArrayList<Column> getColumnsInHiveOrder() { return getColumns(); }
+ public List<Column> getColumnsInHiveOrder() { return getColumns(); }
public static boolean isKuduTable(org.apache.hadoop.hive.metastore.api.Table msTbl) {
return KUDU_STORAGE_HANDLER.equals(msTbl.getParameters().get(KEY_STORAGE_HANDLER));
@@ -285,7 +285,7 @@ public class KuduTable extends Table implements FeKuduTable {
private static List<KuduPartitionParam> loadPartitionByParamsFromThrift(
List<TKuduPartitionParam> params) {
- List<KuduPartitionParam> ret= Lists.newArrayList();
+ List<KuduPartitionParam> ret= new ArrayList<>();
for (TKuduPartitionParam param: params) {
if (param.isSetBy_hash_param()) {
TKuduPartitionByHashParam hashParam = param.getBy_hash_param();
@@ -318,7 +318,7 @@ public class KuduTable extends Table implements FeKuduTable {
Preconditions.checkNotNull(partitionBy_);
// IMPALA-5154: partitionBy_ may be empty if Kudu table created outside Impala,
// partition_by must be explicitly created because the field is required.
- tbl.partition_by = Lists.newArrayList();
+ tbl.partition_by = new ArrayList<>();
for (KuduPartitionParam partitionParam: partitionBy_) {
tbl.addToPartition_by(partitionParam.toThrift());
}
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java b/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
index c93b245..38d304c 100644
--- a/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
+++ b/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
@@ -17,25 +17,25 @@
package org.apache.impala.catalog;
-import org.apache.impala.common.Reference;
-import org.apache.impala.thrift.TPartitionStats;
-import org.apache.impala.common.JniUtil;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.codec.binary.Base64;
import org.apache.impala.common.ImpalaException;
import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.common.Reference;
+import org.apache.impala.thrift.TPartitionStats;
import org.apache.impala.util.CompressionUtil;
import org.apache.impala.util.MetaStoreUtil;
-
-import java.util.List;
-import java.util.Map;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.TSerializer;
import org.apache.thrift.TException;
-import com.google.common.base.Preconditions;
+import org.apache.thrift.TSerializer;
+import org.apache.thrift.protocol.TCompactProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.collect.Lists;
+import com.google.common.base.Preconditions;
/**
* Handles serialising and deserialising intermediate statistics from the Hive MetaStore
@@ -188,7 +188,7 @@ public class PartitionStatsUtil {
static private List<String> chunkStringForHms(String data, int chunkLen) {
int idx = 0;
- List<String> ret = Lists.newArrayList();
+ List<String> ret = new ArrayList<>();
while (idx < data.length()) {
int remaining = data.length() - idx;
int chunkSize = (chunkLen > remaining) ? remaining : chunkLen;
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java b/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java
index 45cfe12..d49be1d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java
+++ b/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java
@@ -17,10 +17,10 @@
package org.apache.impala.catalog;
+import java.util.ArrayList;
import java.util.List;
import org.apache.impala.thrift.TPrimitiveType;
-import com.google.common.collect.Lists;
public enum PrimitiveType {
INVALID_TYPE("INVALID_TYPE", -1, TPrimitiveType.INVALID_TYPE),
@@ -99,7 +99,7 @@ public enum PrimitiveType {
public TPrimitiveType toThrift() { return thriftType_; }
public static List<TPrimitiveType> toThrift(PrimitiveType[] types) {
- List<TPrimitiveType> result = Lists.newArrayList();
+ List<TPrimitiveType> result = new ArrayList<>();
for (PrimitiveType t: types) {
result.add(t.toThrift());
}
http://git-wip-us.apache.org/repos/asf/impala/blob/049e1056/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java b/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java
index 653633a..b64962e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hive.metastore.api.FunctionType;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.ResourceType;
import org.apache.hadoop.hive.metastore.api.ResourceUri;
-
import org.apache.impala.analysis.FunctionName;
import org.apache.impala.analysis.HdfsUri;
import org.apache.impala.common.AnalysisException;
@@ -128,7 +127,7 @@ public class ScalarFunction extends Function {
// Currently we only support certain primitive types.
JavaUdfDataType javaRetType = JavaUdfDataType.getType(fnRetType);
if (javaRetType == JavaUdfDataType.INVALID_TYPE) return null;
- List<Type> fnArgsList = Lists.newArrayList();
+ List<Type> fnArgsList = new ArrayList<>();
for (Class<?> argClass: fnArgs) {
JavaUdfDataType javaUdfType = JavaUdfDataType.getType(argClass);
if (javaUdfType == JavaUdfDataType.INVALID_TYPE) return null;
@@ -166,7 +165,7 @@ public class ScalarFunction extends Function {
* implementations. (gen_functions.py). Is there a better way to coordinate this.
*/
public static ScalarFunction createBuiltinOperator(String name,
- ArrayList<Type> argTypes, Type retType) {
+ List<Type> argTypes, Type retType) {
// Operators have a well defined symbol based on the function name and type.
// Convert Add(TINYINT, TINYINT) --> Add_TinyIntVal_TinyIntVal
String beFn = Character.toUpperCase(name.charAt(0)) + name.substring(1);
@@ -219,12 +218,12 @@ public class ScalarFunction extends Function {
}
public static ScalarFunction createBuiltinOperator(String name, String symbol,
- ArrayList<Type> argTypes, Type retType) {
+ List<Type> argTypes, Type retType) {
return createBuiltin(name, symbol, argTypes, false, retType, false);
}
public static ScalarFunction createBuiltin(String name, String symbol,
- ArrayList<Type> argTypes, boolean hasVarArgs, Type retType,
+ List<Type> argTypes, boolean hasVarArgs, Type retType,
boolean userVisible) {
ScalarFunction fn = new ScalarFunction(
new FunctionName(BuiltinsDb.NAME, name), argTypes, retType, hasVarArgs);
@@ -249,7 +248,7 @@ public class ScalarFunction extends Function {
* TFunctionBinaryType.
*/
public static ScalarFunction createForTesting(String db,
- String fnName, ArrayList<Type> args, Type retType, String uriPath,
+ String fnName, List<Type> args, Type retType, String uriPath,
String symbolName, String initFnSymbol, String closeFnSymbol,
TFunctionBinaryType type) {
ScalarFunction fn = new ScalarFunction(new FunctionName(db, fnName), args,