You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2017/03/10 22:10:04 UTC
[24/50] [abbrv] hbase git commit: HBASE-17532 Replaced explicit type
with diamond operator
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java
index 52f2b95..fc80768 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java
@@ -124,7 +124,7 @@ public class ObserverContext<E extends CoprocessorEnvironment> {
public static <T extends CoprocessorEnvironment> ObserverContext<T> createAndPrepare(
T env, ObserverContext<T> context) {
if (context == null) {
- context = new ObserverContext<T>(RpcServer.getRequestUser());
+ context = new ObserverContext<>(RpcServer.getRequestUser());
}
context.prepare(env);
return context;
@@ -146,7 +146,7 @@ public class ObserverContext<E extends CoprocessorEnvironment> {
public static <T extends CoprocessorEnvironment> ObserverContext<T> createAndPrepare(
T env, ObserverContext<T> context, User user) {
if (context == null) {
- context = new ObserverContext<T>(user);
+ context = new ObserverContext<>(user);
}
context.prepare(env);
return context;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
index bfcf486..a00ccd9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
@@ -107,8 +107,7 @@ public class ForeignException extends IOException {
// if there is no stack trace, ignore it and just return the message
if (trace == null) return null;
// build the stack trace for the message
- List<StackTraceElementMessage> pbTrace =
- new ArrayList<StackTraceElementMessage>(trace.length);
+ List<StackTraceElementMessage> pbTrace = new ArrayList<>(trace.length);
for (StackTraceElement elem : trace) {
StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder();
stackBuilder.setDeclaringClass(elem.getClassName());
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java
index f5fc979..f339e9e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java
@@ -42,8 +42,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
public class ForeignExceptionDispatcher implements ForeignExceptionListener, ForeignExceptionSnare {
private static final Log LOG = LogFactory.getLog(ForeignExceptionDispatcher.class);
protected final String name;
- protected final List<ForeignExceptionListener> listeners =
- new ArrayList<ForeignExceptionListener>();
+ protected final List<ForeignExceptionListener> listeners = new ArrayList<>();
private ForeignException exception;
public ForeignExceptionDispatcher(String name) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
index 403244f..df7653f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
@@ -59,8 +59,7 @@ public class ExecutorService {
private static final Log LOG = LogFactory.getLog(ExecutorService.class);
// hold the all the executors created in a map addressable by their names
- private final ConcurrentHashMap<String, Executor> executorMap =
- new ConcurrentHashMap<String, Executor>();
+ private final ConcurrentHashMap<String, Executor> executorMap = new ConcurrentHashMap<>();
// Name of the server hosting this executor service.
private final String servername;
@@ -164,7 +163,7 @@ public class ExecutorService {
// the thread pool executor that services the requests
final TrackingThreadPoolExecutor threadPoolExecutor;
// work queue to use - unbounded queue
- final BlockingQueue<Runnable> q = new LinkedBlockingQueue<Runnable>();
+ final BlockingQueue<Runnable> q = new LinkedBlockingQueue<>();
private final String name;
private static final AtomicLong seqids = new AtomicLong(0);
private final long id;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index 625d01f..48745ca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -85,9 +85,9 @@ public class FavoredNodeAssignmentHelper {
final RackManager rackManager) {
this.servers = servers;
this.rackManager = rackManager;
- this.rackToRegionServerMap = new HashMap<String, List<ServerName>>();
- this.regionServerToRackMap = new HashMap<String, String>();
- this.uniqueRackList = new ArrayList<String>();
+ this.rackToRegionServerMap = new HashMap<>();
+ this.regionServerToRackMap = new HashMap<>();
+ this.uniqueRackList = new ArrayList<>();
this.random = new Random();
}
@@ -122,7 +122,7 @@ public class FavoredNodeAssignmentHelper {
public static void updateMetaWithFavoredNodesInfo(
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
Connection connection) throws IOException {
- List<Put> puts = new ArrayList<Put>();
+ List<Put> puts = new ArrayList<>();
for (Map.Entry<HRegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
if (put != null) {
@@ -142,7 +142,7 @@ public class FavoredNodeAssignmentHelper {
public static void updateMetaWithFavoredNodesInfo(
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
Configuration conf) throws IOException {
- List<Put> puts = new ArrayList<Put>();
+ List<Put> puts = new ArrayList<>();
for (Map.Entry<HRegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
if (put != null) {
@@ -226,7 +226,7 @@ public class FavoredNodeAssignmentHelper {
// The regions should be distributed proportionately to the racksizes
void placePrimaryRSAsRoundRobin(Map<ServerName, List<HRegionInfo>> assignmentMap,
Map<HRegionInfo, ServerName> primaryRSMap, List<HRegionInfo> regions) {
- List<String> rackList = new ArrayList<String>(rackToRegionServerMap.size());
+ List<String> rackList = new ArrayList<>(rackToRegionServerMap.size());
rackList.addAll(rackToRegionServerMap.keySet());
int rackIndex = random.nextInt(rackList.size());
int maxRackSize = 0;
@@ -266,7 +266,7 @@ public class FavoredNodeAssignmentHelper {
if (assignmentMap != null) {
List<HRegionInfo> regionsForServer = assignmentMap.get(currentServer);
if (regionsForServer == null) {
- regionsForServer = new ArrayList<HRegionInfo>();
+ regionsForServer = new ArrayList<>();
assignmentMap.put(currentServer, regionsForServer);
}
regionsForServer.add(regionInfo);
@@ -284,8 +284,7 @@ public class FavoredNodeAssignmentHelper {
Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryRS(
Map<HRegionInfo, ServerName> primaryRSMap) {
- Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
- new HashMap<HRegionInfo, ServerName[]>();
+ Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = new HashMap<>();
for (Map.Entry<HRegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
// Get the target region and its primary region server rack
HRegionInfo regionInfo = entry.getKey();
@@ -317,12 +316,11 @@ public class FavoredNodeAssignmentHelper {
private Map<ServerName, Set<HRegionInfo>> mapRSToPrimaries(
Map<HRegionInfo, ServerName> primaryRSMap) {
- Map<ServerName, Set<HRegionInfo>> primaryServerMap =
- new HashMap<ServerName, Set<HRegionInfo>>();
+ Map<ServerName, Set<HRegionInfo>> primaryServerMap = new HashMap<>();
for (Entry<HRegionInfo, ServerName> e : primaryRSMap.entrySet()) {
Set<HRegionInfo> currentSet = primaryServerMap.get(e.getValue());
if (currentSet == null) {
- currentSet = new HashSet<HRegionInfo>();
+ currentSet = new HashSet<>();
}
currentSet.add(e.getKey());
primaryServerMap.put(e.getValue(), currentSet);
@@ -341,8 +339,7 @@ public class FavoredNodeAssignmentHelper {
Map<HRegionInfo, ServerName> primaryRSMap) {
Map<ServerName, Set<HRegionInfo>> serverToPrimaries =
mapRSToPrimaries(primaryRSMap);
- Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
- new HashMap<HRegionInfo, ServerName[]>();
+ Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = new HashMap<>();
for (Entry<HRegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
// Get the target region and its primary region server rack
@@ -381,11 +378,11 @@ public class FavoredNodeAssignmentHelper {
// Random to choose the secondary and tertiary region server
// from another rack to place the secondary and tertiary
// Random to choose one rack except for the current rack
- Set<String> rackSkipSet = new HashSet<String>();
+ Set<String> rackSkipSet = new HashSet<>();
rackSkipSet.add(primaryRack);
String secondaryRack = getOneRandomRack(rackSkipSet);
List<ServerName> serverList = getServersFromRack(secondaryRack);
- Set<ServerName> serverSet = new HashSet<ServerName>();
+ Set<ServerName> serverSet = new HashSet<>();
serverSet.addAll(serverList);
ServerName[] favoredNodes;
if (serverList.size() >= 2) {
@@ -393,7 +390,7 @@ public class FavoredNodeAssignmentHelper {
// Skip the secondary for the tertiary placement
// skip the servers which share the primary already
Set<HRegionInfo> primaries = serverToPrimaries.get(primaryRS);
- Set<ServerName> skipServerSet = new HashSet<ServerName>();
+ Set<ServerName> skipServerSet = new HashSet<>();
while (true) {
ServerName[] secondaryAndTertiary = null;
if (primaries.size() > 1) {
@@ -423,7 +420,7 @@ public class FavoredNodeAssignmentHelper {
}
secondaryRack = getOneRandomRack(rackSkipSet);
serverList = getServersFromRack(secondaryRack);
- serverSet = new HashSet<ServerName>();
+ serverSet = new HashSet<>();
serverSet.addAll(serverList);
}
@@ -452,7 +449,7 @@ public class FavoredNodeAssignmentHelper {
// Pick the tertiary
if (getTotalNumberOfRacks() == 2) {
// Pick the tertiary from the same rack of the primary RS
- Set<ServerName> serverSkipSet = new HashSet<ServerName>();
+ Set<ServerName> serverSkipSet = new HashSet<>();
serverSkipSet.add(primaryRS);
favoredNodes[1] = getOneRandomServer(primaryRack, serverSkipSet);
} else {
@@ -478,7 +475,7 @@ public class FavoredNodeAssignmentHelper {
} else {
// Randomly select two region servers from the server list and make sure
// they are not overlap with the primary region server;
- Set<ServerName> serverSkipSet = new HashSet<ServerName>();
+ Set<ServerName> serverSkipSet = new HashSet<>();
serverSkipSet.add(primaryRS);
// Place the secondary RS
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
index f0af0d0..6e7bf0e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
@@ -87,7 +87,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
@Override
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
//TODO. Look at is whether Stochastic loadbalancer can be integrated with this
- List<RegionPlan> plans = new ArrayList<RegionPlan>();
+ List<RegionPlan> plans = new ArrayList<>();
//perform a scan of the meta to get the latest updates (if any)
SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment =
new SnapshotOfRegionAssignmentFromMeta(super.services.getConnection());
@@ -97,10 +97,8 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
LOG.warn("Not running balancer since exception was thrown " + ie);
return plans;
}
- Map<ServerName, ServerName> serverNameToServerNameWithoutCode =
- new HashMap<ServerName, ServerName>();
- Map<ServerName, ServerName> serverNameWithoutCodeToServerName =
- new HashMap<ServerName, ServerName>();
+ Map<ServerName, ServerName> serverNameToServerNameWithoutCode = new HashMap<>();
+ Map<ServerName, ServerName> serverNameWithoutCodeToServerName = new HashMap<>();
ServerManager serverMgr = super.services.getServerManager();
for (ServerName sn: serverMgr.getOnlineServersList()) {
ServerName s = ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE);
@@ -189,7 +187,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers);
Map<ServerName,List<HRegionInfo>> regionsWithFavoredNodesMap = segregatedRegions.getFirst();
List<HRegionInfo> regionsWithNoFavoredNodes = segregatedRegions.getSecond();
- assignmentMap = new HashMap<ServerName, List<HRegionInfo>>();
+ assignmentMap = new HashMap<>();
roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regionsWithNoFavoredNodes,
servers);
// merge the assignment maps
@@ -225,9 +223,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
}
}
}
- List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
+ List<HRegionInfo> regions = new ArrayList<>(1);
regions.add(regionInfo);
- Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>(1);
+ Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<>(1);
primaryRSMap.put(regionInfo, primary);
assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap);
return primary;
@@ -241,9 +239,8 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
private Pair<Map<ServerName, List<HRegionInfo>>, List<HRegionInfo>>
segregateRegionsAndAssignRegionsWithFavoredNodes(List<HRegionInfo> regions,
List<ServerName> availableServers) {
- Map<ServerName, List<HRegionInfo>> assignmentMapForFavoredNodes =
- new HashMap<ServerName, List<HRegionInfo>>(regions.size() / 2);
- List<HRegionInfo> regionsWithNoFavoredNodes = new ArrayList<HRegionInfo>(regions.size()/2);
+ Map<ServerName, List<HRegionInfo>> assignmentMapForFavoredNodes = new HashMap<>(regions.size() / 2);
+ List<HRegionInfo> regionsWithNoFavoredNodes = new ArrayList<>(regions.size()/2);
for (HRegionInfo region : regions) {
List<ServerName> favoredNodes = fnm.getFavoredNodes(region);
ServerName primaryHost = null;
@@ -272,8 +269,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
regionsWithNoFavoredNodes.add(region);
}
}
- return new Pair<Map<ServerName, List<HRegionInfo>>, List<HRegionInfo>>(
- assignmentMapForFavoredNodes, regionsWithNoFavoredNodes);
+ return new Pair<>(assignmentMapForFavoredNodes, regionsWithNoFavoredNodes);
}
// Do a check of the hostname and port and return the servername from the servers list
@@ -316,7 +312,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
HRegionInfo region, ServerName host) {
List<HRegionInfo> regionsOnServer = null;
if ((regionsOnServer = assignmentMapForFavoredNodes.get(host)) == null) {
- regionsOnServer = new ArrayList<HRegionInfo>();
+ regionsOnServer = new ArrayList<>();
assignmentMapForFavoredNodes.put(host, regionsOnServer);
}
regionsOnServer.add(region);
@@ -329,7 +325,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
private void roundRobinAssignmentImpl(FavoredNodeAssignmentHelper assignmentHelper,
Map<ServerName, List<HRegionInfo>> assignmentMap,
List<HRegionInfo> regions, List<ServerName> servers) throws IOException {
- Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
+ Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<>();
// figure the primary RSs
assignmentHelper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap);
@@ -347,7 +343,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
for (HRegionInfo region : regions) {
// Store the favored nodes without startCode for the ServerName objects
// We don't care about the startcode; but only the hostname really
- List<ServerName> favoredNodesForRegion = new ArrayList<ServerName>(3);
+ List<ServerName> favoredNodesForRegion = new ArrayList<>(3);
ServerName sn = primaryRSMap.get(region);
favoredNodesForRegion.add(ServerName.valueOf(sn.getHostname(), sn.getPort(),
ServerName.NON_STARTCODE));
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
index f24d9fc..ff6d9e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
@@ -46,7 +46,7 @@ public class FavoredNodesPlan {
}
public FavoredNodesPlan() {
- favoredNodesMap = new ConcurrentHashMap<String, List<ServerName>>();
+ favoredNodesMap = new ConcurrentHashMap<>();
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java
index de53bd9..cfc0640 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java
@@ -38,7 +38,7 @@ public class HttpRequestLog {
private static final HashMap<String, String> serverToComponent;
static {
- serverToComponent = new HashMap<String, String>();
+ serverToComponent = new HashMap<>();
serverToComponent.put("master", "master");
serverToComponent.put("region", "regionserver");
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 3ce2f09..c7e1153 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -163,9 +163,8 @@ public class HttpServer implements FilterContainer {
protected final WebAppContext webAppContext;
protected final boolean findPort;
- protected final Map<ServletContextHandler, Boolean> defaultContexts =
- new HashMap<ServletContextHandler, Boolean>();
- protected final List<String> filterNames = new ArrayList<String>();
+ protected final Map<ServletContextHandler, Boolean> defaultContexts = new HashMap<>();
+ protected final List<String> filterNames = new ArrayList<>();
static final String STATE_DESCRIPTION_ALIVE = " - alive";
static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
@@ -555,7 +554,7 @@ public class HttpServer implements FilterContainer {
addDefaultApps(contexts, appDir, conf);
addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
- Map<String, String> params = new HashMap<String, String>();
+ Map<String, String> params = new HashMap<>();
params.put("xframeoptions", conf.get("hbase.http.filter.xframeoptions.mode", "DENY"));
addGlobalFilter("clickjackingprevention",
ClickjackingPreventionFilter.class.getName(), params);
@@ -906,7 +905,7 @@ public class HttpServer implements FilterContainer {
private void initSpnego(Configuration conf, String hostName,
String usernameConfKey, String keytabConfKey, String kerberosNameRuleKey,
String signatureSecretKeyFileKey) throws IOException {
- Map<String, String> params = new HashMap<String, String>();
+ Map<String, String> params = new HashMap<>();
String principalInConf = getOrEmptyString(conf, usernameConfKey);
if (!principalInConf.isEmpty()) {
params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, SecurityUtil.getServerPrincipal(
@@ -1302,7 +1301,7 @@ public class HttpServer implements FilterContainer {
@Override
public Map<String, String[]> getParameterMap() {
- Map<String, String[]> result = new HashMap<String,String[]>();
+ Map<String, String[]> result = new HashMap<>();
Map<String, String[]> raw = rawRequest.getParameterMap();
for (Map.Entry<String,String[]> item: raw.entrySet()) {
String[] rawValue = item.getValue();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java
index 710676d..7c3204b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java
@@ -124,7 +124,7 @@ public class StaticUserWebFilter extends FilterInitializer {
@Override
public void initFilter(FilterContainer container, Configuration conf) {
- HashMap<String, String> options = new HashMap<String, String>();
+ HashMap<String, String> options = new HashMap<>();
String username = getUsernameFromConf(conf);
options.put(HBASE_HTTP_STATIC_USER, username);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
index 3caf67f..ca0dfbc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
@@ -426,7 +426,7 @@ public class FileLink {
protected void setLocations(Path originPath, Path... alternativePaths) {
assert this.locations == null : "Link locations already set";
- List<Path> paths = new ArrayList<Path>(alternativePaths.length +1);
+ List<Path> paths = new ArrayList<>(alternativePaths.length +1);
if (originPath != null) {
paths.add(originPath);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
index 5128662..cdc5be1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
@@ -509,7 +509,7 @@ public class HFileLink extends FileLink {
String tableSubstr = name.substring(separatorIndex + 1)
.replace('=', TableName.NAMESPACE_DELIM);
TableName linkTableName = TableName.valueOf(tableSubstr);
- return new Pair<TableName, String>(linkTableName, linkRegionName);
+ return new Pair<>(linkTableName, linkRegionName);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
index a6ee6da..c64cdf7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
@@ -494,7 +494,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
* @return A CompletableFuture that hold the acked length after flushing.
*/
public CompletableFuture<Long> flush(boolean syncBlock) {
- CompletableFuture<Long> future = new CompletableFuture<Long>();
+ CompletableFuture<Long> future = new CompletableFuture<>();
if (eventLoop.inEventLoop()) {
flush0(future, syncBlock);
} else {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
index 875ff77..3eaacc4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
@@ -682,8 +682,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
try {
stat = namenode.create(src,
FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName,
- new EnumSetWritable<CreateFlag>(
- overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)),
+ new EnumSetWritable<>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)),
createParent, replication, blockSize, CryptoProtocolVersion.supported());
} catch (Exception e) {
if (e instanceof RemoteException) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index 9335ef6..5c306c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -205,8 +205,7 @@ public class BlockCacheUtil {
/**
* Map by filename. use concurent utils because we want our Map and contained blocks sorted.
*/
- private NavigableMap<String, NavigableSet<CachedBlock>> cachedBlockByFile =
- new ConcurrentSkipListMap<String, NavigableSet<CachedBlock>>();
+ private NavigableMap<String, NavigableSet<CachedBlock>> cachedBlockByFile = new ConcurrentSkipListMap<>();
FastLongHistogram hist = new FastLongHistogram();
/**
@@ -217,7 +216,7 @@ public class BlockCacheUtil {
if (isFull()) return true;
NavigableSet<CachedBlock> set = this.cachedBlockByFile.get(cb.getFilename());
if (set == null) {
- set = new ConcurrentSkipListSet<CachedBlock>();
+ set = new ConcurrentSkipListSet<>();
this.cachedBlockByFile.put(cb.getFilename(), set);
}
set.add(cb);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java
index 5d2d54a..3140150 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java
@@ -30,8 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
*/
@InterfaceAudience.Private
public class CacheableDeserializerIdManager {
- private static final Map<Integer, CacheableDeserializer<Cacheable>> registeredDeserializers =
- new HashMap<Integer, CacheableDeserializer<Cacheable>>();
+ private static final Map<Integer, CacheableDeserializer<Cacheable>> registeredDeserializers = new HashMap<>();
private static final AtomicInteger identifier = new AtomicInteger(0);
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
index a50566a..96dfcbd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
@@ -70,7 +70,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase
BloomFilterChunk chunk;
}
- private Queue<ReadyChunk> readyChunks = new LinkedList<ReadyChunk>();
+ private Queue<ReadyChunk> readyChunks = new LinkedList<>();
/** The first key in the current Bloom filter chunk. */
private byte[] firstKeyInChunk = null;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 0e07d6e..c5b334a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -638,7 +638,7 @@ public class HFile {
static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR");
static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");
- private final SortedMap<byte [], byte []> map = new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
+ private final SortedMap<byte [], byte []> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
public FileInfo() {
super();
@@ -894,7 +894,7 @@ public class HFile {
*/
static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
throws IOException {
- List<Path> regionHFiles = new ArrayList<Path>();
+ List<Path> regionHFiles = new ArrayList<>();
PathFilter dirFilter = new FSUtils.DirFilter(fs);
FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
for(FileStatus dir : familyDirs) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 1970ade..fba15ba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1406,8 +1406,7 @@ public class HFileBlock implements Cacheable {
* next blocks header seems unnecessary given we usually get the block size
* from the hfile index. Review!
*/
- private AtomicReference<PrefetchedHeader> prefetchedHeader =
- new AtomicReference<PrefetchedHeader>(new PrefetchedHeader());
+ private AtomicReference<PrefetchedHeader> prefetchedHeader = new AtomicReference<>(new PrefetchedHeader());
/** The size of the file we are reading from, or -1 if unknown. */
protected long fileSize;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 575c074..b36c292 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -239,7 +239,7 @@ public class HFileBlockIndex {
private Cell[] blockKeys;
/** Pre-computed mid-key */
- private AtomicReference<Cell> midKey = new AtomicReference<Cell>();
+ private AtomicReference<Cell> midKey = new AtomicReference<>();
/** Needed doing lookup on blocks. */
private CellComparator comparator;
@@ -741,7 +741,7 @@ public class HFileBlockIndex {
// keys[numEntries] = Infinity, then we are maintaining an invariant that
// keys[low - 1] < key < keys[high + 1] while narrowing down the range.
ByteBufferKeyOnlyKeyValue nonRootIndexkeyOnlyKV = new ByteBufferKeyOnlyKeyValue();
- ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<ByteBuffer>();
+ ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<>();
while (low <= high) {
mid = (low + high) >>> 1;
@@ -1402,20 +1402,20 @@ public class HFileBlockIndex {
static class BlockIndexChunk {
/** First keys of the key range corresponding to each index entry. */
- private final List<byte[]> blockKeys = new ArrayList<byte[]>();
+ private final List<byte[]> blockKeys = new ArrayList<>();
/** Block offset in backing stream. */
- private final List<Long> blockOffsets = new ArrayList<Long>();
+ private final List<Long> blockOffsets = new ArrayList<>();
/** On-disk data sizes of lower-level data or index blocks. */
- private final List<Integer> onDiskDataSizes = new ArrayList<Integer>();
+ private final List<Integer> onDiskDataSizes = new ArrayList<>();
/**
* The cumulative number of sub-entries, i.e. entries on deeper-level block
* index entries. numSubEntriesAt[i] is the number of sub-entries in the
* blocks corresponding to this chunk's entries #0 through #i inclusively.
*/
- private final List<Long> numSubEntriesAt = new ArrayList<Long>();
+ private final List<Long> numSubEntriesAt = new ArrayList<>();
/**
* The offset of the next entry to be added, relative to the end of the
@@ -1434,8 +1434,7 @@ public class HFileBlockIndex {
* records in a "non-root" format block. These offsets are relative to the
* end of this secondary index.
*/
- private final List<Integer> secondaryIndexOffsetMarks =
- new ArrayList<Integer>();
+ private final List<Integer> secondaryIndexOffsetMarks = new ArrayList<>();
/**
* Adds a new entry to this block index chunk.
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index 1710379..030a25e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -122,7 +122,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
*/
private byte[] row = null;
- private List<Path> files = new ArrayList<Path>();
+ private List<Path> files = new ArrayList<>();
private int count;
private static final String FOUR_SPACES = " ";
@@ -232,7 +232,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
if (verbose) {
System.out.println("checkMobIntegrity is enabled");
}
- mobFileLocations = new HashMap<String, List<Path>>();
+ mobFileLocations = new HashMap<>();
}
cmd.getArgList().forEach((file) -> files.add(new Path(file)));
@@ -372,8 +372,8 @@ public class HFilePrettyPrinter extends Configured implements Tool {
HFileScanner scanner, byte[] row) throws IOException {
Cell pCell = null;
FileSystem fs = FileSystem.get(getConf());
- Set<String> foundMobFiles = new LinkedHashSet<String>(FOUND_MOB_FILES_CACHE_CAPACITY);
- Set<String> missingMobFiles = new LinkedHashSet<String>(MISSING_MOB_FILES_CACHE_CAPACITY);
+ Set<String> foundMobFiles = new LinkedHashSet<>(FOUND_MOB_FILES_CACHE_CAPACITY);
+ Set<String> missingMobFiles = new LinkedHashSet<>(MISSING_MOB_FILES_CACHE_CAPACITY);
do {
Cell cell = scanner.getCell();
if (row != null && row.length != 0) {
@@ -469,7 +469,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
String tableName = tn.getNameAsString();
List<Path> locations = mobFileLocations.get(tableName);
if (locations == null) {
- locations = new ArrayList<Path>(2);
+ locations = new ArrayList<>(2);
locations.add(MobUtils.getMobFamilyPath(getConf(), tn, family));
locations.add(HFileArchiveUtil.getStoreArchivePath(getConf(), tn,
MobUtils.getMobRegionInfo(tn).getEncodedName(), family));
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index c92d77d..4e8cbaa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -138,7 +138,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
* Blocks read from the load-on-open section, excluding data root index, meta
* index, and file info.
*/
- private List<HFileBlock> loadOnOpenBlocks = new ArrayList<HFileBlock>();
+ private List<HFileBlock> loadOnOpenBlocks = new ArrayList<>();
/** Minimum minor version supported by this HFile format */
static final int MIN_MINOR_VERSION = 0;
@@ -493,7 +493,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// buffer backed keyonlyKV
private ByteBufferKeyOnlyKeyValue bufBackedKeyOnlyKv = new ByteBufferKeyOnlyKeyValue();
// A pair for reusing in blockSeek() so that we don't garbage lot of objects
- final ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<ByteBuffer>();
+ final ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<>();
/**
* The next indexed key is to keep track of the indexed key of the next data block.
@@ -506,7 +506,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// Current block being used
protected HFileBlock curBlock;
// Previous blocks that were used in the course of the read
- protected final ArrayList<HFileBlock> prevBlocks = new ArrayList<HFileBlock>();
+ protected final ArrayList<HFileBlock> prevBlocks = new ArrayList<>();
public HFileScannerImpl(final HFile.Reader reader, final boolean cacheBlocks,
final boolean pread, final boolean isCompaction) {
@@ -975,7 +975,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
public Cell getKey() {
assertSeeked();
// Create a new object so that this getKey is cached as firstKey, lastKey
- ObjectIntPair<ByteBuffer> keyPair = new ObjectIntPair<ByteBuffer>();
+ ObjectIntPair<ByteBuffer> keyPair = new ObjectIntPair<>();
blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, keyPair);
ByteBuffer keyBuf = keyPair.getFirst();
if (keyBuf.hasArray()) {
@@ -996,7 +996,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
public ByteBuffer getValue() {
assertSeeked();
// Okie to create new Pair. Not used in hot path
- ObjectIntPair<ByteBuffer> valuePair = new ObjectIntPair<ByteBuffer>();
+ ObjectIntPair<ByteBuffer> valuePair = new ObjectIntPair<>();
this.blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen,
currValueLen, valuePair);
ByteBuffer valBuf = valuePair.getFirst().duplicate();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index 8a2d238..6a20b99 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -91,10 +91,10 @@ public class HFileWriterImpl implements HFile.Writer {
protected final CellComparator comparator;
/** Meta block names. */
- protected List<byte[]> metaNames = new ArrayList<byte[]>();
+ protected List<byte[]> metaNames = new ArrayList<>();
/** {@link Writable}s representing meta block data. */
- protected List<Writable> metaData = new ArrayList<Writable>();
+ protected List<Writable> metaData = new ArrayList<>();
/**
* First cell in a block.
@@ -132,7 +132,7 @@ public class HFileWriterImpl implements HFile.Writer {
public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1;
/** Inline block writers for multi-level block index and compound Blooms. */
- private List<InlineBlockWriter> inlineBlockWriters = new ArrayList<InlineBlockWriter>();
+ private List<InlineBlockWriter> inlineBlockWriters = new ArrayList<>();
/** block writer */
protected HFileBlock.Writer blockWriter;
@@ -153,7 +153,7 @@ public class HFileWriterImpl implements HFile.Writer {
private Cell lastCellOfPreviousBlock = null;
/** Additional data items to be written to the "load-on-open" section. */
- private List<BlockWritable> additionalLoadOnOpenData = new ArrayList<BlockWritable>();
+ private List<BlockWritable> additionalLoadOnOpenData = new ArrayList<>();
protected long maxMemstoreTS = 0;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
index 61deef5..838fa41 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
@@ -42,8 +42,7 @@ public class PrefetchExecutor {
private static final Log LOG = LogFactory.getLog(PrefetchExecutor.class);
/** Futures for tracking block prefetch activity */
- private static final Map<Path,Future<?>> prefetchFutures =
- new ConcurrentSkipListMap<Path,Future<?>>();
+ private static final Map<Path,Future<?>> prefetchFutures = new ConcurrentSkipListMap<>();
/** Executor pool shared among all HFiles for block prefetch */
private static final ScheduledExecutorService prefetchExecutorPool;
/** Delay before beginning prefetch */
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 1bcdfc4..cb23ca9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -142,8 +142,7 @@ public class BucketCache implements BlockCache, HeapSize {
* to the BucketCache. It then updates the ramCache and backingMap accordingly.
*/
@VisibleForTesting
- final ArrayList<BlockingQueue<RAMQueueEntry>> writerQueues =
- new ArrayList<BlockingQueue<RAMQueueEntry>>();
+ final ArrayList<BlockingQueue<RAMQueueEntry>> writerQueues = new ArrayList<>();
@VisibleForTesting
final WriterThread[] writerThreads;
@@ -151,7 +150,7 @@ public class BucketCache implements BlockCache, HeapSize {
private volatile boolean freeInProgress = false;
private final Lock freeSpaceLock = new ReentrantLock();
- private UniqueIndexMap<Integer> deserialiserMap = new UniqueIndexMap<Integer>();
+ private UniqueIndexMap<Integer> deserialiserMap = new UniqueIndexMap<>();
private final AtomicLong realCacheSize = new AtomicLong(0);
private final AtomicLong heapSize = new AtomicLong(0);
@@ -191,7 +190,7 @@ public class BucketCache implements BlockCache, HeapSize {
final IdReadWriteLock offsetLock = new IdReadWriteLock();
private final NavigableSet<BlockCacheKey> blocksByHFile =
- new ConcurrentSkipListSet<BlockCacheKey>(new Comparator<BlockCacheKey>() {
+ new ConcurrentSkipListSet<>(new Comparator<BlockCacheKey>() {
@Override
public int compare(BlockCacheKey a, BlockCacheKey b) {
int nameComparison = a.getHfileName().compareTo(b.getHfileName());
@@ -240,13 +239,13 @@ public class BucketCache implements BlockCache, HeapSize {
bucketAllocator = new BucketAllocator(capacity, bucketSizes);
for (int i = 0; i < writerThreads.length; ++i) {
- writerQueues.add(new ArrayBlockingQueue<RAMQueueEntry>(writerQLen));
+ writerQueues.add(new ArrayBlockingQueue<>(writerQLen));
}
assert writerQueues.size() == writerThreads.length;
- this.ramCache = new ConcurrentHashMap<BlockCacheKey, RAMQueueEntry>();
+ this.ramCache = new ConcurrentHashMap<>();
- this.backingMap = new ConcurrentHashMap<BlockCacheKey, BucketEntry>((int) blockNumCapacity);
+ this.backingMap = new ConcurrentHashMap<>((int) blockNumCapacity);
if (ioEngine.isPersistent() && persistencePath != null) {
try {
@@ -756,7 +755,7 @@ public class BucketCache implements BlockCache, HeapSize {
}
}
- PriorityQueue<BucketEntryGroup> bucketQueue = new PriorityQueue<BucketEntryGroup>(3);
+ PriorityQueue<BucketEntryGroup> bucketQueue = new PriorityQueue<>(3);
bucketQueue.add(bucketSingle);
bucketQueue.add(bucketMulti);
@@ -841,7 +840,7 @@ public class BucketCache implements BlockCache, HeapSize {
}
public void run() {
- List<RAMQueueEntry> entries = new ArrayList<RAMQueueEntry>();
+ List<RAMQueueEntry> entries = new ArrayList<>();
try {
while (cacheEnabled && writerEnabled) {
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/UniqueIndexMap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/UniqueIndexMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/UniqueIndexMap.java
index 9a72c4e..a3003c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/UniqueIndexMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/UniqueIndexMap.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
public final class UniqueIndexMap<T> implements Serializable {
private static final long serialVersionUID = -1145635738654002342L;
- ConcurrentHashMap<T, Integer> mForwardMap = new ConcurrentHashMap<T, Integer>();
- ConcurrentHashMap<Integer, T> mReverseMap = new ConcurrentHashMap<Integer, T>();
+ ConcurrentHashMap<T, Integer> mForwardMap = new ConcurrentHashMap<>();
+ ConcurrentHashMap<Integer, T> mReverseMap = new ConcurrentHashMap<>();
AtomicInteger mIndex = new AtomicInteger(0);
// Map a length to an index. If we can't, allocate a new mapping. We might
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
index 100f751..cf99f8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
@@ -169,7 +169,7 @@ public class MemorySizeUtil {
if (MemStoreLAB.isEnabled(conf)) {
// We are in offheap Memstore use
long globalMemStoreLimit = (long) (offheapMSGlobal * 1024 * 1024); // Size in bytes
- return new Pair<Long, MemoryType>(globalMemStoreLimit, MemoryType.NON_HEAP);
+ return new Pair<>(globalMemStoreLimit, MemoryType.NON_HEAP);
} else {
// Off heap max memstore size is configured with turning off MSLAB. It makes no sense. Do a
// warn log and go with on heap memstore percentage. By default it will be 40% of Xmx
@@ -178,7 +178,7 @@ public class MemorySizeUtil {
+ " Going with on heap global memstore size ('" + MEMSTORE_SIZE_KEY + "')");
}
}
- return new Pair<Long, MemoryType>(getOnheapGlobalMemstoreSize(conf), MemoryType.HEAP);
+ return new Pair<>(getOnheapGlobalMemstoreSize(conf), MemoryType.HEAP);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
index a9b6fd1..4ebfcd9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
@@ -60,7 +60,7 @@ public class FifoRpcScheduler extends RpcScheduler {
handlerCount,
60,
TimeUnit.SECONDS,
- new ArrayBlockingQueue<Runnable>(maxQueueLength),
+ new ArrayBlockingQueue<>(maxQueueLength),
new DaemonThreadFactory("FifoRpcScheduler.handler"),
new ThreadPoolExecutor.CallerRunsPolicy());
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 7813bf4..4b0c974 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -145,12 +145,10 @@ public abstract class RpcServer implements RpcServerInterface,
/** This is set to Call object before Handler invokes an RPC and ybdie
* after the call returns.
*/
- protected static final ThreadLocal<RpcCall> CurCall =
- new ThreadLocal<RpcCall>();
+ protected static final ThreadLocal<RpcCall> CurCall = new ThreadLocal<>();
/** Keeps MonitoredRPCHandler per handler thread. */
- protected static final ThreadLocal<MonitoredRPCHandler> MONITORED_RPC
- = new ThreadLocal<MonitoredRPCHandler>();
+ protected static final ThreadLocal<MonitoredRPCHandler> MONITORED_RPC = new ThreadLocal<>();
protected final InetSocketAddress bindAddress;
@@ -413,7 +411,7 @@ public abstract class RpcServer implements RpcServerInterface,
this.connection.compressionCodec, cells);
if (b != null) {
cellBlockSize = b.remaining();
- cellBlock = new ArrayList<ByteBuffer>(1);
+ cellBlock = new ArrayList<>(1);
cellBlock.add(b);
}
}
@@ -1177,7 +1175,7 @@ public abstract class RpcServer implements RpcServerInterface,
status.getClient(), startTime, processingTime, qTime,
responseSize);
}
- return new Pair<Message, CellScanner>(result, controller.cellScanner());
+ return new Pair<>(result, controller.cellScanner());
} catch (Throwable e) {
// The above callBlockingMethod will always return a SE. Strip the SE wrapper before
// putting it on the wire. Its needed to adhere to the pb Service Interface but we don't
@@ -1218,7 +1216,7 @@ public abstract class RpcServer implements RpcServerInterface,
String clientAddress, long startTime, int processingTime, int qTime,
long responseSize) throws IOException {
// base information that is reported regardless of type of call
- Map<String, Object> responseInfo = new HashMap<String, Object>();
+ Map<String, Object> responseInfo = new HashMap<>();
responseInfo.put("starttimems", startTime);
responseInfo.put("processingtimems", processingTime);
responseInfo.put("queuetimems", qTime);
@@ -1299,7 +1297,7 @@ public abstract class RpcServer implements RpcServerInterface,
static Pair<ByteBuff, CallCleanup> allocateByteBuffToReadInto(ByteBufferPool pool,
int minSizeForPoolUse, int reqLen) {
ByteBuff resultBuf;
- List<ByteBuffer> bbs = new ArrayList<ByteBuffer>((reqLen / pool.getBufferSize()) + 1);
+ List<ByteBuffer> bbs = new ArrayList<>((reqLen / pool.getBufferSize()) + 1);
int remain = reqLen;
ByteBuffer buf = null;
while (remain >= minSizeForPoolUse && (buf = pool.getBuffer()) != null) {
@@ -1325,14 +1323,14 @@ public abstract class RpcServer implements RpcServerInterface,
resultBuf.limit(reqLen);
if (bufsFromPool != null) {
final ByteBuffer[] bufsFromPoolFinal = bufsFromPool;
- return new Pair<ByteBuff, RpcServer.CallCleanup>(resultBuf, () -> {
+ return new Pair<>(resultBuf, () -> {
// Return back all the BBs to pool
for (int i = 0; i < bufsFromPoolFinal.length; i++) {
pool.putbackBuffer(bufsFromPoolFinal[i]);
}
});
}
- return new Pair<ByteBuff, RpcServer.CallCleanup>(resultBuf, null);
+ return new Pair<>(resultBuf, null);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
index 075d8b8..9e1e81e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -259,8 +259,7 @@ public class SimpleRpcServer extends RpcServer {
private final Selector readSelector;
Reader() throws IOException {
- this.pendingConnections =
- new LinkedBlockingQueue<Connection>(readerPendingConnectionQueueLength);
+ this.pendingConnections = new LinkedBlockingQueue<>(readerPendingConnectionQueueLength);
this.readSelector = Selector.open();
}
@@ -603,7 +602,7 @@ public class SimpleRpcServer extends RpcServer {
return lastPurgeTime;
}
- ArrayList<Connection> conWithOldCalls = new ArrayList<Connection>();
+ ArrayList<Connection> conWithOldCalls = new ArrayList<>();
// get the list of channels from list of keys.
synchronized (writeSelector.keys()) {
for (SelectionKey key : writeSelector.keys()) {
@@ -763,7 +762,7 @@ public class SimpleRpcServer extends RpcServer {
protected SocketChannel channel;
private ByteBuff data;
private ByteBuffer dataLengthBuffer;
- protected final ConcurrentLinkedDeque<Call> responseQueue = new ConcurrentLinkedDeque<Call>();
+ protected final ConcurrentLinkedDeque<Call> responseQueue = new ConcurrentLinkedDeque<>();
private final Lock responseWriteLock = new ReentrantLock();
private LongAdder rpcCount = new LongAdder(); // number of outstanding rpcs
private long lastContact;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
index ee6da75..e1ca999 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
@@ -116,7 +116,7 @@ implements TableMap<ImmutableBytesWritable,Result> {
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
- ArrayList<byte[]> foundList = new ArrayList<byte[]>();
+ ArrayList<byte[]> foundList = new ArrayList<>();
int numCols = columns.length;
if (numCols > 0) {
for (Cell value: r.listCells()) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index 819ef57..8f0504a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -120,7 +120,7 @@ public class CopyTable extends Configured implements Tool {
if(families != null) {
String[] fams = families.split(",");
- Map<String,String> cfRenameMap = new HashMap<String,String>();
+ Map<String,String> cfRenameMap = new HashMap<>();
for(String fam : fams) {
String sourceCf;
if(fam.contains(":")) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java
index 10e34d2..004ee5c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java
@@ -53,7 +53,7 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression
private static final Log LOG = LogFactory.getLog(DefaultVisibilityExpressionResolver.class);
private Configuration conf;
- private final Map<String, Integer> labels = new HashMap<String, Integer>();
+ private final Map<String, Integer> labels = new HashMap<>();
@Override
public Configuration getConf() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java
index 8a9fa49..44e43c8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java
@@ -106,7 +106,7 @@ extends TableMapper<ImmutableBytesWritable,Result> implements Configurable {
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
- ArrayList<byte[]> foundList = new ArrayList<byte[]>();
+ ArrayList<byte[]> foundList = new ArrayList<>();
int numCols = columns.length;
if (numCols > 0) {
for (Cell value: r.listCells()) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 13ea5c5..1ce5f60 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -137,7 +137,7 @@ public class HFileOutputFormat2
static <V extends Cell> RecordWriter<ImmutableBytesWritable, V>
createRecordWriter(final TaskAttemptContext context) throws IOException {
- return new HFileRecordWriter<V>(context, null);
+ return new HFileRecordWriter<>(context, null);
}
protected static class HFileRecordWriter<V extends Cell>
@@ -211,7 +211,7 @@ public class HFileOutputFormat2
overriddenEncoding = null;
}
- writers = new TreeMap<byte[], WriterLength>(Bytes.BYTES_COMPARATOR);
+ writers = new TreeMap<>(Bytes.BYTES_COMPARATOR);
previousRow = HConstants.EMPTY_BYTE_ARRAY;
now = Bytes.toBytes(EnvironmentEdgeManager.currentTime());
rollRequested = false;
@@ -418,8 +418,7 @@ public class HFileOutputFormat2
private static List<ImmutableBytesWritable> getRegionStartKeys(RegionLocator table)
throws IOException {
byte[][] byteKeys = table.getStartKeys();
- ArrayList<ImmutableBytesWritable> ret =
- new ArrayList<ImmutableBytesWritable>(byteKeys.length);
+ ArrayList<ImmutableBytesWritable> ret = new ArrayList<>(byteKeys.length);
for (byte[] byteKey : byteKeys) {
ret.add(new ImmutableBytesWritable(byteKey));
}
@@ -442,8 +441,7 @@ public class HFileOutputFormat2
// have keys < the first region (which has an empty start key)
// so we need to remove it. Otherwise we would end up with an
// empty reducer with index 0
- TreeSet<ImmutableBytesWritable> sorted =
- new TreeSet<ImmutableBytesWritable>(startKeys);
+ TreeSet<ImmutableBytesWritable> sorted = new TreeSet<>(startKeys);
ImmutableBytesWritable first = sorted.first();
if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
@@ -587,8 +585,7 @@ public class HFileOutputFormat2
conf) {
Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
COMPRESSION_FAMILIES_CONF_KEY);
- Map<byte[], Algorithm> compressionMap = new TreeMap<byte[],
- Algorithm>(Bytes.BYTES_COMPARATOR);
+ Map<byte[], Algorithm> compressionMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue());
compressionMap.put(e.getKey(), algorithm);
@@ -607,8 +604,7 @@ public class HFileOutputFormat2
static Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) {
Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
BLOOM_TYPE_FAMILIES_CONF_KEY);
- Map<byte[], BloomType> bloomTypeMap = new TreeMap<byte[],
- BloomType>(Bytes.BYTES_COMPARATOR);
+ Map<byte[], BloomType> bloomTypeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
BloomType bloomType = BloomType.valueOf(e.getValue());
bloomTypeMap.put(e.getKey(), bloomType);
@@ -627,8 +623,7 @@ public class HFileOutputFormat2
static Map<byte[], Integer> createFamilyBlockSizeMap(Configuration conf) {
Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
BLOCK_SIZE_FAMILIES_CONF_KEY);
- Map<byte[], Integer> blockSizeMap = new TreeMap<byte[],
- Integer>(Bytes.BYTES_COMPARATOR);
+ Map<byte[], Integer> blockSizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
Integer blockSize = Integer.parseInt(e.getValue());
blockSizeMap.put(e.getKey(), blockSize);
@@ -649,8 +644,7 @@ public class HFileOutputFormat2
Configuration conf) {
Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
DATABLOCK_ENCODING_FAMILIES_CONF_KEY);
- Map<byte[], DataBlockEncoding> encoderMap = new TreeMap<byte[],
- DataBlockEncoding>(Bytes.BYTES_COMPARATOR);
+ Map<byte[], DataBlockEncoding> encoderMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue())));
}
@@ -667,7 +661,7 @@ public class HFileOutputFormat2
*/
private static Map<byte[], String> createFamilyConfValueMap(
Configuration conf, String confName) {
- Map<byte[], String> confValMap = new TreeMap<byte[], String>(Bytes.BYTES_COMPARATOR);
+ Map<byte[], String> confValMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
String confVal = conf.get(confName, "");
for (String familyConf : confVal.split("&")) {
String[] familySplit = familyConf.split("=");
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
index 674cb57..2834f86 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
@@ -213,7 +213,7 @@ public class HashTable extends Configured implements Tool {
* into the desired number of partitions.
*/
void selectPartitions(Pair<byte[][], byte[][]> regionStartEndKeys) {
- List<byte[]> startKeys = new ArrayList<byte[]>();
+ List<byte[]> startKeys = new ArrayList<>();
for (int i = 0; i < regionStartEndKeys.getFirst().length; i++) {
byte[] regionStartKey = regionStartEndKeys.getFirst()[i];
byte[] regionEndKey = regionStartEndKeys.getSecond()[i];
@@ -244,7 +244,7 @@ public class HashTable extends Configured implements Tool {
}
// choose a subset of start keys to group regions into ranges
- partitions = new ArrayList<ImmutableBytesWritable>(numHashFiles - 1);
+ partitions = new ArrayList<>(numHashFiles - 1);
// skip the first start key as it is not a partition between ranges.
for (long i = 1; i < numHashFiles; i++) {
int splitIndex = (int) (numRegions * i / numHashFiles);
@@ -269,7 +269,7 @@ public class HashTable extends Configured implements Tool {
@SuppressWarnings("deprecation")
SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf);
ImmutableBytesWritable key = new ImmutableBytesWritable();
- partitions = new ArrayList<ImmutableBytesWritable>();
+ partitions = new ArrayList<>();
while (reader.next(key)) {
partitions.add(new ImmutableBytesWritable(key.copyBytes()));
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
index e2693b9..d1beb8d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
@@ -471,7 +471,7 @@ public class Import extends Configured implements Tool {
}
private static ArrayList<byte[]> toQuotedByteArrays(String... stringArgs) {
- ArrayList<byte[]> quotedArgs = new ArrayList<byte[]>();
+ ArrayList<byte[]> quotedArgs = new ArrayList<>();
for (String stringArg : stringArgs) {
// all the filters' instantiation methods expected quoted args since they are coming from
// the shell, so add them here, though it shouldn't really be needed :-/
@@ -536,7 +536,7 @@ public class Import extends Configured implements Tool {
String[] allMappings = allMappingsPropVal.split(",");
for (String mapping: allMappings) {
if(cfRenameMap == null) {
- cfRenameMap = new TreeMap<byte[],byte[]>(Bytes.BYTES_COMPARATOR);
+ cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
}
String [] srcAndDest = mapping.split(":");
if(srcAndDest.length != 2) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
index 39085df..a379d53 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
@@ -249,7 +249,7 @@ public class ImportTsv extends Configured implements Tool {
public ParsedLine parse(byte[] lineBytes, int length)
throws BadTsvLineException {
// Enumerate separator offsets
- ArrayList<Integer> tabOffsets = new ArrayList<Integer>(maxColumnCount);
+ ArrayList<Integer> tabOffsets = new ArrayList<>(maxColumnCount);
for (int i = 0; i < length; i++) {
if (lineBytes[i] == separatorByte) {
tabOffsets.add(i);
@@ -448,7 +448,7 @@ public class ImportTsv extends Configured implements Tool {
+ " are less than row key position.");
}
}
- return new Pair<Integer, Integer>(startPos, endPos - startPos + 1);
+ return new Pair<>(startPos, endPos - startPos + 1);
}
}
@@ -521,7 +521,7 @@ public class ImportTsv extends Configured implements Tool {
boolean noStrict = conf.getBoolean(NO_STRICT_COL_FAMILY, false);
// if no.strict is false then check column family
if(!noStrict) {
- ArrayList<String> unmatchedFamilies = new ArrayList<String>();
+ ArrayList<String> unmatchedFamilies = new ArrayList<>();
Set<String> cfSet = getColumnFamilies(columns);
HTableDescriptor tDesc = table.getTableDescriptor();
for (String cf : cfSet) {
@@ -530,7 +530,7 @@ public class ImportTsv extends Configured implements Tool {
}
}
if(unmatchedFamilies.size() > 0) {
- ArrayList<String> familyNames = new ArrayList<String>();
+ ArrayList<String> familyNames = new ArrayList<>();
for (HColumnDescriptor family : table.getTableDescriptor().getFamilies()) {
familyNames.add(family.getNameAsString());
}
@@ -626,7 +626,7 @@ public class ImportTsv extends Configured implements Tool {
}
private static Set<String> getColumnFamilies(String[] columns) {
- Set<String> cfSet = new HashSet<String>();
+ Set<String> cfSet = new HashSet<>();
for (String aColumn : columns) {
if (TsvParser.ROWKEY_COLUMN_SPEC.equals(aColumn)
|| TsvParser.TIMESTAMPKEY_COLUMN_SPEC.equals(aColumn)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java
index f6c7a90..d37ab94 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java
@@ -40,7 +40,7 @@ public class KeyValueSortReducer extends Reducer<ImmutableBytesWritable, KeyValu
protected void reduce(ImmutableBytesWritable row, java.lang.Iterable<KeyValue> kvs,
org.apache.hadoop.mapreduce.Reducer<ImmutableBytesWritable, KeyValue, ImmutableBytesWritable, KeyValue>.Context context)
throws java.io.IOException, InterruptedException {
- TreeSet<KeyValue> map = new TreeSet<KeyValue>(CellComparator.COMPARATOR);
+ TreeSet<KeyValue> map = new TreeSet<>(CellComparator.COMPARATOR);
for (KeyValue kv: kvs) {
try {
map.add(kv.clone());
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 963c4a1..718e88b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -125,7 +125,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
private int maxFilesPerRegionPerFamily;
private boolean assignSeqIds;
- private Set<String> unmatchedFamilies = new HashSet<String>();
+ private Set<String> unmatchedFamilies = new HashSet<>();
// Source filesystem
private FileSystem fs;
@@ -630,7 +630,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
builder.setNameFormat("LoadIncrementalHFiles-%1$d");
ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
- new LinkedBlockingQueue<Runnable>(), builder.build());
+ new LinkedBlockingQueue<>(), builder.build());
((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
return pool;
}
@@ -889,7 +889,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
// Add these back at the *front* of the queue, so there's a lower
// chance that the region will just split again before we get there.
- List<LoadQueueItem> lqis = new ArrayList<LoadQueueItem>(2);
+ List<LoadQueueItem> lqis = new ArrayList<>(2);
lqis.add(new LoadQueueItem(item.family, botOut));
lqis.add(new LoadQueueItem(item.family, topOut));
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
index 7c1ebbc..dc2fc0d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
@@ -65,8 +65,7 @@ public class MultiHFileOutputFormat extends FileOutputFormat<ImmutableBytesWrita
final FileSystem fs = outputDir.getFileSystem(conf);
// Map of tables to writers
- final Map<ImmutableBytesWritable, RecordWriter<ImmutableBytesWritable, V>> tableWriters =
- new HashMap<ImmutableBytesWritable, RecordWriter<ImmutableBytesWritable, V>>();
+ final Map<ImmutableBytesWritable, RecordWriter<ImmutableBytesWritable, V>> tableWriters = new HashMap<>();
return new RecordWriter<ImmutableBytesWritable, V>() {
@Override
@@ -82,7 +81,7 @@ public class MultiHFileOutputFormat extends FileOutputFormat<ImmutableBytesWrita
+ tableOutputDir.toString());
// Create writer for one specific table
- tableWriter = new HFileOutputFormat2.HFileRecordWriter<V>(context, tableOutputDir);
+ tableWriter = new HFileOutputFormat2.HFileRecordWriter<>(context, tableOutputDir);
// Put table into map
tableWriters.put(tableName, tableWriter);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
index 48a982b..3099c0d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
@@ -92,7 +92,7 @@ public class MultiTableInputFormat extends MultiTableInputFormatBase implements
throw new IllegalArgumentException("There must be at least 1 scan configuration set to : "
+ SCANS);
}
- List<Scan> scans = new ArrayList<Scan>();
+ List<Scan> scans = new ArrayList<>();
for (int i = 0; i < rawScans.length; i++) {
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
index 4931c3f..25ea047 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
@@ -163,7 +163,7 @@ public abstract class MultiTableInputFormatBase extends
throw new IOException("No scans were provided.");
}
- Map<TableName, List<Scan>> tableMaps = new HashMap<TableName, List<Scan>>();
+ Map<TableName, List<Scan>> tableMaps = new HashMap<>();
for (Scan scan : scans) {
byte[] tableNameBytes = scan.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME);
if (tableNameBytes == null)
@@ -173,13 +173,13 @@ public abstract class MultiTableInputFormatBase extends
List<Scan> scanList = tableMaps.get(tableName);
if (scanList == null) {
- scanList = new ArrayList<Scan>();
+ scanList = new ArrayList<>();
tableMaps.put(tableName, scanList);
}
scanList.add(scan);
}
- List<InputSplit> splits = new ArrayList<InputSplit>();
+ List<InputSplit> splits = new ArrayList<>();
Iterator iter = tableMaps.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<TableName, List<Scan>> entry = (Map.Entry<TableName, List<Scan>>) iter.next();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
index 6657c99..b48580d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
@@ -79,10 +79,10 @@ public class PutSortReducer extends
"putsortreducer.row.threshold", 1L * (1<<30));
Iterator<Put> iter = puts.iterator();
while (iter.hasNext()) {
- TreeSet<KeyValue> map = new TreeSet<KeyValue>(CellComparator.COMPARATOR);
+ TreeSet<KeyValue> map = new TreeSet<>(CellComparator.COMPARATOR);
long curSize = 0;
// stop at the end or the RAM threshold
- List<Tag> tags = new ArrayList<Tag>();
+ List<Tag> tags = new ArrayList<>();
while (iter.hasNext() && curSize < threshold) {
// clear the tags
tags.clear();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java
index 1561b3b..98c92ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java
@@ -92,7 +92,7 @@ public class ResultSerialization extends Configured implements Serialization<Res
}
byte[] buf = new byte[totalBuffer];
readChunked(in, buf, 0, totalBuffer);
- List<Cell> kvs = new ArrayList<Cell>();
+ List<Cell> kvs = new ArrayList<>();
int offset = 0;
while (offset < totalBuffer) {
int keyLength = Bytes.toInt(buf, offset);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index 9ebb3c1..7962a42 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -137,8 +137,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
/** The reverse DNS lookup cache mapping: IPAddress => HostName */
- private HashMap<InetAddress, String> reverseDNSCacheMap =
- new HashMap<InetAddress, String>();
+ private HashMap<InetAddress, String> reverseDNSCacheMap = new HashMap<>();
/**
* Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses
@@ -262,7 +261,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
if (null == regLoc) {
throw new IOException("Expecting at least one region.");
}
- List<InputSplit> splits = new ArrayList<InputSplit>(1);
+ List<InputSplit> splits = new ArrayList<>(1);
long regionSize = sizeCalculator.getRegionSize(regLoc.getRegionInfo().getRegionName());
TableSplit split = new TableSplit(tableName, scan,
HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc
@@ -270,7 +269,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
splits.add(split);
return splits;
}
- List<InputSplit> splits = new ArrayList<InputSplit>(keys.getFirst().length);
+ List<InputSplit> splits = new ArrayList<>(keys.getFirst().length);
for (int i = 0; i < keys.getFirst().length; i++) {
if (!includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
continue;
@@ -373,7 +372,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
*/
private List<InputSplit> calculateRebalancedSplits(List<InputSplit> list, JobContext context,
long average) throws IOException {
- List<InputSplit> resultList = new ArrayList<InputSplit>();
+ List<InputSplit> resultList = new ArrayList<>();
Configuration conf = context.getConfiguration();
//The default data skew ratio is 3
long dataSkewRatio = conf.getLong(INPUT_AUTOBALANCE_MAXSKEWRATIO, 3);