You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/03/07 19:23:28 UTC

[15/22] hbase git commit: HBASE-17532 Replaced explicit type with diamond operator

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 98f39da..69b486d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -451,7 +451,7 @@ public class TableMapReduceUtil {
     job.setMapperClass(mapper);
     Configuration conf = job.getConfiguration();
     HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
-    List<String> scanStrings = new ArrayList<String>();
+    List<String> scanStrings = new ArrayList<>();
 
     for (Scan scan : scans) {
       scanStrings.add(convertScanToString(scan));
@@ -807,7 +807,7 @@ public class TableMapReduceUtil {
     if (conf == null) {
       throw new IllegalArgumentException("Must provide a configuration object.");
     }
-    Set<String> paths = new HashSet<String>(conf.getStringCollection("tmpjars"));
+    Set<String> paths = new HashSet<>(conf.getStringCollection("tmpjars"));
     if (paths.isEmpty()) {
       throw new IllegalArgumentException("Configuration contains no tmpjars.");
     }
@@ -879,13 +879,13 @@ public class TableMapReduceUtil {
       Class<?>... classes) throws IOException {
 
     FileSystem localFs = FileSystem.getLocal(conf);
-    Set<String> jars = new HashSet<String>();
+    Set<String> jars = new HashSet<>();
     // Add jars that are already in the tmpjars variable
     jars.addAll(conf.getStringCollection("tmpjars"));
 
     // add jars as we find them to a map of contents jar name so that we can avoid
     // creating new jars for classes that have already been packaged.
-    Map<String, String> packagedClasses = new HashMap<String, String>();
+    Map<String, String> packagedClasses = new HashMap<>();
 
     // Add jars containing the specified classes
     for (Class<?> clazz : classes) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
index c40396f..b2db319 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
@@ -183,7 +183,7 @@ public class TableSnapshotInputFormat extends InputFormat<ImmutableBytesWritable
 
   @Override
   public List<InputSplit> getSplits(JobContext job) throws IOException, InterruptedException {
-    List<InputSplit> results = new ArrayList<InputSplit>();
+    List<InputSplit> results = new ArrayList<>();
     for (TableSnapshotInputFormatImpl.InputSplit split :
         TableSnapshotInputFormatImpl.getSplits(job.getConfiguration())) {
       results.add(new TableSnapshotRegionSplit(split));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index d52703a..69beef8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -311,7 +311,7 @@ public class TableSnapshotInputFormatImpl {
 
     Path tableDir = FSUtils.getTableDir(restoreDir, htd.getTableName());
 
-    List<InputSplit> splits = new ArrayList<InputSplit>();
+    List<InputSplit> splits = new ArrayList<>();
     for (HRegionInfo hri : regionManifests) {
       // load region descriptor
 
@@ -346,7 +346,7 @@ public class TableSnapshotInputFormatImpl {
    */
   public static List<String> getBestLocations(
       Configuration conf, HDFSBlocksDistribution blockDistribution) {
-    List<String> locations = new ArrayList<String>(3);
+    List<String> locations = new ArrayList<>(3);
 
     HostAndWeight[] hostAndWeights = blockDistribution.getTopHostsWithWeights();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
index 1e09f03..05a4820 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
@@ -146,7 +146,7 @@ public class TextSortReducer extends
         "reducer.row.threshold", 1L * (1<<30));
     Iterator<Text> iter = lines.iterator();
     while (iter.hasNext()) {
-      Set<KeyValue> kvs = new TreeSet<KeyValue>(CellComparator.COMPARATOR);
+      Set<KeyValue> kvs = new TreeSet<>(CellComparator.COMPARATOR);
       long curSize = 0;
       // stop at the end or the RAM threshold
       while (iter.hasNext() && curSize < threshold) {
@@ -160,7 +160,7 @@ public class TextSortReducer extends
           ttl = parsed.getCellTTL();
           
           // create tags for the parsed line
-          List<Tag> tags = new ArrayList<Tag>();
+          List<Tag> tags = new ArrayList<>();
           if (cellVisibilityExpr != null) {
             tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags(
               cellVisibilityExpr));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
index 94bcb43..08b5aab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
@@ -113,7 +113,7 @@ extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put>
       throw new RuntimeException("No row key column specified");
     }
     this.kvCreator = new CellCreator(conf);
-    tags = new ArrayList<Tag>();
+    tags = new ArrayList<>();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java
index 02fcbba..8514ace 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java
@@ -239,7 +239,7 @@ public class WALInputFormat extends InputFormat<WALKey, WALEdit> {
     FileSystem fs = inputDir.getFileSystem(conf);
     List<FileStatus> files = getFiles(fs, inputDir, startTime, endTime);
 
-    List<InputSplit> splits = new ArrayList<InputSplit>(files.size());
+    List<InputSplit> splits = new ArrayList<>(files.size());
     for (FileStatus file : files) {
       splits.add(new WALSplit(file.getPath().toString(), file.getLen(), startTime, endTime));
     }
@@ -248,7 +248,7 @@ public class WALInputFormat extends InputFormat<WALKey, WALEdit> {
 
   private List<FileStatus> getFiles(FileSystem fs, Path dir, long startTime, long endTime)
       throws IOException {
-    List<FileStatus> result = new ArrayList<FileStatus>();
+    List<FileStatus> result = new ArrayList<>();
     LOG.debug("Scanning " + dir.toString() + " for WAL files");
 
     FileStatus[] files = fs.listStatus(dir);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
index 2c67baf..cca2041 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
@@ -132,7 +132,7 @@ public class WALPlayer extends Configured implements Tool {
    */
   protected static class WALMapper
   extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, Mutation> {
-    private Map<TableName, TableName> tables = new TreeMap<TableName, TableName>();
+    private Map<TableName, TableName> tables = new TreeMap<>();
 
     @Override
     public void map(WALKey key, WALEdit value, Context context)

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 60ad545..69ebd97 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -115,7 +115,7 @@ public class AssignmentManager {
 
   private AtomicInteger numRegionsOpened = new AtomicInteger(0);
 
-  final private KeyLocker<String> locker = new KeyLocker<String>();
+  final private KeyLocker<String> locker = new KeyLocker<>();
 
   Set<HRegionInfo> replicasToClose = Collections.synchronizedSet(new HashSet<HRegionInfo>());
 
@@ -141,8 +141,7 @@ public class AssignmentManager {
   // TODO: When do plans get cleaned out?  Ever? In server open and in server
   // shutdown processing -- St.Ack
   // All access to this Map must be synchronized.
-  final NavigableMap<String, RegionPlan> regionPlans =
-    new TreeMap<String, RegionPlan>();
+  final NavigableMap<String, RegionPlan> regionPlans = new TreeMap<>();
 
   private final TableStateManager tableStateManager;
 
@@ -183,8 +182,7 @@ public class AssignmentManager {
    * because we don't expect this to happen frequently; we don't
    * want to copy this information over during each state transition either.
    */
-  private final ConcurrentHashMap<String, AtomicInteger>
-    failedOpenTracker = new ConcurrentHashMap<String, AtomicInteger>();
+  private final ConcurrentHashMap<String, AtomicInteger> failedOpenTracker = new ConcurrentHashMap<>();
 
   // In case not using ZK for region assignment, region states
   // are persisted in meta with a state store
@@ -197,7 +195,7 @@ public class AssignmentManager {
   public static boolean TEST_SKIP_SPLIT_HANDLING = false;
 
   /** Listeners that are called on assignment events. */
-  private List<AssignmentListener> listeners = new CopyOnWriteArrayList<AssignmentListener>();
+  private List<AssignmentListener> listeners = new CopyOnWriteArrayList<>();
 
   private RegionStateListener regionStateListener;
 
@@ -382,7 +380,7 @@ public class AssignmentManager {
         pending++;
       }
     }
-    return new Pair<Integer, Integer>(pending, hris.size());
+    return new Pair<>(pending, hris.size());
   }
 
   /**
@@ -748,16 +746,16 @@ public class AssignmentManager {
         return true;
       }
       LOG.info("Assigning " + regionCount + " region(s) to " + destination.toString());
-      Set<String> encodedNames = new HashSet<String>(regionCount);
+      Set<String> encodedNames = new HashSet<>(regionCount);
       for (HRegionInfo region : regions) {
         encodedNames.add(region.getEncodedName());
       }
 
-      List<HRegionInfo> failedToOpenRegions = new ArrayList<HRegionInfo>();
+      List<HRegionInfo> failedToOpenRegions = new ArrayList<>();
       Map<String, Lock> locks = locker.acquireLocks(encodedNames);
       try {
-        Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>(regionCount);
-        List<RegionState> states = new ArrayList<RegionState>(regionCount);
+        Map<String, RegionPlan> plans = new HashMap<>(regionCount);
+        List<RegionState> states = new ArrayList<>(regionCount);
         for (HRegionInfo region : regions) {
           String encodedName = region.getEncodedName();
           if (!isDisabledorDisablingRegionInRIT(region)) {
@@ -797,8 +795,7 @@ public class AssignmentManager {
         // that unnecessary timeout on RIT is reduced.
         this.addPlans(plans);
 
-        List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos =
-          new ArrayList<Pair<HRegionInfo, List<ServerName>>>(states.size());
+        List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos = new ArrayList<>(states.size());
         for (RegionState state: states) {
           HRegionInfo region = state.getRegion();
           regionStates.updateRegionState(
@@ -807,8 +804,7 @@ public class AssignmentManager {
           if (shouldAssignFavoredNodes(region)) {
             favoredNodes = server.getFavoredNodesManager().getFavoredNodesWithDNPort(region);
           }
-          regionOpenInfos.add(new Pair<HRegionInfo, List<ServerName>>(
-            region, favoredNodes));
+          regionOpenInfos.add(new Pair<>(region, favoredNodes));
         }
 
         // Move on to open regions.
@@ -908,7 +904,7 @@ public class AssignmentManager {
       }
 
       // wait for assignment completion
-      ArrayList<HRegionInfo> userRegionSet = new ArrayList<HRegionInfo>(regions.size());
+      ArrayList<HRegionInfo> userRegionSet = new ArrayList<>(regions.size());
       for (HRegionInfo region: regions) {
         if (!region.getTable().isSystemTable()) {
           userRegionSet.add(region);
@@ -1443,7 +1439,7 @@ public class AssignmentManager {
    */
   public boolean waitForAssignment(HRegionInfo regionInfo)
       throws InterruptedException {
-    ArrayList<HRegionInfo> regionSet = new ArrayList<HRegionInfo>(1);
+    ArrayList<HRegionInfo> regionSet = new ArrayList<>(1);
     regionSet.add(regionInfo);
     return waitForAssignment(regionSet, true, Long.MAX_VALUE);
   }
@@ -1588,7 +1584,7 @@ public class AssignmentManager {
       }
 
       // invoke assignment (async)
-      ArrayList<HRegionInfo> userRegionSet = new ArrayList<HRegionInfo>(regions);
+      ArrayList<HRegionInfo> userRegionSet = new ArrayList<>(regions);
       for (Map.Entry<ServerName, List<HRegionInfo>> plan: bulkPlan.entrySet()) {
         if (!assign(plan.getKey(), plan.getValue()) && !server.isStopped()) {
           for (HRegionInfo region: plan.getValue()) {
@@ -1640,7 +1636,7 @@ public class AssignmentManager {
     if (retainAssignment) {
       assign(allRegions);
     } else {
-      List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionsFromMetaScan);
+      List<HRegionInfo> regions = new ArrayList<>(regionsFromMetaScan);
       assign(regions);
     }
 
@@ -1687,7 +1683,7 @@ public class AssignmentManager {
    */
   public static List<HRegionInfo> replicaRegionsNotRecordedInMeta(
       Set<HRegionInfo> regionsRecordedInMeta, MasterServices master)throws IOException {
-    List<HRegionInfo> regionsNotRecordedInMeta = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> regionsNotRecordedInMeta = new ArrayList<>();
     for (HRegionInfo hri : regionsRecordedInMeta) {
       TableName table = hri.getTable();
       if(master.getTableDescriptors().get(table) == null)
@@ -1723,7 +1719,7 @@ public class AssignmentManager {
     // Get any new but slow to checkin region server that joined the cluster
     Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
     // Set of offline servers to be returned
-    Set<ServerName> offlineServers = new HashSet<ServerName>();
+    Set<ServerName> offlineServers = new HashSet<>();
     // Iterate regions in META
     for (Result result : results) {
       if (result == null && LOG.isDebugEnabled()){
@@ -2446,7 +2442,7 @@ public class AssignmentManager {
     threadPoolExecutorService.submit(splitReplicasCallable);
 
     // wait for assignment completion
-    ArrayList<HRegionInfo> regionAssignSet = new ArrayList<HRegionInfo>(2);
+    ArrayList<HRegionInfo> regionAssignSet = new ArrayList<>(2);
     regionAssignSet.add(daughterAHRI);
     regionAssignSet.add(daughterBHRI);
     while (!waitForAssignment(regionAssignSet, true, regionAssignSet.size(),
@@ -2558,7 +2554,7 @@ public class AssignmentManager {
 
     final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1));
     final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2));
-    Set<String> encodedNames = new HashSet<String>(2);
+    Set<String> encodedNames = new HashSet<>(2);
     encodedNames.add(a.getEncodedName());
     encodedNames.add(b.getEncodedName());
     Map<String, Lock> locks = locker.acquireLocks(encodedNames);
@@ -2645,7 +2641,7 @@ public class AssignmentManager {
     threadPoolExecutorService.submit(mergeReplicasCallable);
 
     // wait for assignment completion
-    ArrayList<HRegionInfo> regionAssignSet = new ArrayList<HRegionInfo>(1);
+    ArrayList<HRegionInfo> regionAssignSet = new ArrayList<>(1);
     regionAssignSet.add(mergedRegion);
     while (!waitForAssignment(regionAssignSet, true, regionAssignSet.size(), Long.MAX_VALUE)) {
       LOG.debug("The merged region " + mergedRegion + " is still in transition. ");
@@ -2754,7 +2750,7 @@ public class AssignmentManager {
       final HRegionInfo hri_b) {
     // Close replicas for the original unmerged regions. create/assign new replicas
     // for the merged parent.
-    List<HRegionInfo> unmergedRegions = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> unmergedRegions = new ArrayList<>();
     unmergedRegions.add(hri_a);
     unmergedRegions.add(hri_b);
     Map<ServerName, List<HRegionInfo>> map = regionStates.getRegionAssignments(unmergedRegions);
@@ -2768,7 +2764,7 @@ public class AssignmentManager {
       }
     }
     int numReplicas = getNumReplicas(server, mergedHri.getTable());
-    List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> regions = new ArrayList<>();
     for (int i = 1; i < numReplicas; i++) {
       regions.add(RegionReplicaUtil.getRegionInfoForReplica(mergedHri, i));
     }
@@ -2790,7 +2786,7 @@ public class AssignmentManager {
     // the replica1s of daughters will be on the same machine
     int numReplicas = getNumReplicas(server, parentHri.getTable());
     // unassign the old replicas
-    List<HRegionInfo> parentRegion = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> parentRegion = new ArrayList<>();
     parentRegion.add(parentHri);
     Map<ServerName, List<HRegionInfo>> currentAssign =
         regionStates.getRegionAssignments(parentRegion);
@@ -2804,7 +2800,7 @@ public class AssignmentManager {
       }
     }
     // assign daughter replicas
-    Map<HRegionInfo, ServerName> map = new HashMap<HRegionInfo, ServerName>();
+    Map<HRegionInfo, ServerName> map = new HashMap<>();
     for (int i = 1; i < numReplicas; i++) {
       prepareDaughterReplicaForAssignment(hri_a, parentHri, i, map);
       prepareDaughterReplicaForAssignment(hri_b, parentHri, i, map);
@@ -2856,7 +2852,7 @@ public class AssignmentManager {
     sendRegionClosedNotification(regionInfo);
     // also note that all the replicas of the primary should be closed
     if (state != null && state.equals(State.SPLIT)) {
-      Collection<HRegionInfo> c = new ArrayList<HRegionInfo>(1);
+      Collection<HRegionInfo> c = new ArrayList<>(1);
       c.add(regionInfo);
       Map<ServerName, List<HRegionInfo>> map = regionStates.getRegionAssignments(c);
       Collection<List<HRegionInfo>> allReplicas = map.values();
@@ -2865,7 +2861,7 @@ public class AssignmentManager {
       }
     }
     else if (state != null && state.equals(State.MERGED)) {
-      Collection<HRegionInfo> c = new ArrayList<HRegionInfo>(1);
+      Collection<HRegionInfo> c = new ArrayList<>(1);
       c.add(regionInfo);
       Map<ServerName, List<HRegionInfo>> map = regionStates.getRegionAssignments(c);
       Collection<List<HRegionInfo>> allReplicas = map.values();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
index d290f26..e1922af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
@@ -51,16 +51,13 @@ public class AssignmentVerificationReport {
   private int totalRegions = 0;
   private int totalRegionServers = 0;
   // for unassigned regions
-  private List<HRegionInfo> unAssignedRegionsList =
-    new ArrayList<HRegionInfo>();
+  private List<HRegionInfo> unAssignedRegionsList = new ArrayList<>();
 
   // For regions without valid favored nodes
-  private List<HRegionInfo> regionsWithoutValidFavoredNodes =
-    new ArrayList<HRegionInfo>();
+  private List<HRegionInfo> regionsWithoutValidFavoredNodes = new ArrayList<>();
 
   // For regions not running on the favored nodes
-  private List<HRegionInfo> nonFavoredAssignedRegionList =
-    new ArrayList<HRegionInfo>();
+  private List<HRegionInfo> nonFavoredAssignedRegionList = new ArrayList<>();
 
   // For regions running on the favored nodes
   private int totalFavoredAssignments = 0;
@@ -73,26 +70,20 @@ public class AssignmentVerificationReport {
   private float avgRegionsOnRS = 0;
   private int maxRegionsOnRS = 0;
   private int minRegionsOnRS = Integer.MAX_VALUE;
-  private Set<ServerName> mostLoadedRSSet =
-    new HashSet<ServerName>();
-  private Set<ServerName> leastLoadedRSSet =
-    new HashSet<ServerName>();
+  private Set<ServerName> mostLoadedRSSet = new HashSet<>();
+  private Set<ServerName> leastLoadedRSSet = new HashSet<>();
 
   private float avgDispersionScore = 0;
   private float maxDispersionScore = 0;
-  private Set<ServerName> maxDispersionScoreServerSet =
-    new HashSet<ServerName>();
+  private Set<ServerName> maxDispersionScoreServerSet = new HashSet<>();
   private float minDispersionScore = Float.MAX_VALUE;
-  private Set<ServerName> minDispersionScoreServerSet =
-    new HashSet<ServerName>();
+  private Set<ServerName> minDispersionScoreServerSet = new HashSet<>();
 
   private float avgDispersionNum = 0;
   private float maxDispersionNum = 0;
-  private Set<ServerName> maxDispersionNumServerSet =
-    new HashSet<ServerName>();
+  private Set<ServerName> maxDispersionNumServerSet = new HashSet<>();
   private float minDispersionNum = Float.MAX_VALUE;
-  private Set<ServerName> minDispersionNumServerSet =
-    new HashSet<ServerName>();
+  private Set<ServerName> minDispersionNumServerSet = new HashSet<>();
 
   public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot,
       Map<String, Map<String, Float>> regionLocalityMap) {
@@ -111,13 +102,10 @@ public class AssignmentVerificationReport {
     Map<HRegionInfo, ServerName> currentAssignment =
       snapshot.getRegionToRegionServerMap();
     // Initialize the server to its hosing region counter map
-    Map<ServerName, Integer> serverToHostingRegionCounterMap =
-      new HashMap<ServerName, Integer>();
+    Map<ServerName, Integer> serverToHostingRegionCounterMap = new HashMap<>();
 
-    Map<ServerName, Integer> primaryRSToRegionCounterMap =
-      new HashMap<ServerName, Integer>();
-    Map<ServerName, Set<ServerName>> primaryToSecTerRSMap =
-      new HashMap<ServerName, Set<ServerName>>();
+    Map<ServerName, Integer> primaryRSToRegionCounterMap = new HashMap<>();
+    Map<ServerName, Set<ServerName>> primaryToSecTerRSMap = new HashMap<>();
 
     // Check the favored nodes and its locality information
     // Also keep tracker of the most loaded and least loaded region servers
@@ -164,7 +152,7 @@ public class AssignmentVerificationReport {
         // Update the primary rs to secondary and tertiary rs map
         Set<ServerName> secAndTerSet = primaryToSecTerRSMap.get(primaryRS);
         if (secAndTerSet == null) {
-          secAndTerSet = new HashSet<ServerName>();
+          secAndTerSet = new HashSet<>();
         }
         secAndTerSet.add(secondaryRS);
         secAndTerSet.add(tertiaryRS);
@@ -340,10 +328,8 @@ public class AssignmentVerificationReport {
       plan = newPlan;
     }
     // Get the region to region server mapping
-    Map<ServerName, Integer> primaryRSToRegionCounterMap =
-        new HashMap<ServerName, Integer>();
-    Map<ServerName, Set<ServerName>> primaryToSecTerRSMap =
-        new HashMap<ServerName, Set<ServerName>>();
+    Map<ServerName, Integer> primaryRSToRegionCounterMap = new HashMap<>();
+    Map<ServerName, Set<ServerName>> primaryToSecTerRSMap = new HashMap<>();
 
     // Check the favored nodes and its locality information
     // Also keep tracker of the most loaded and least loaded region servers
@@ -375,7 +361,7 @@ public class AssignmentVerificationReport {
         // Update the primary rs to secondary and tertiary rs map
         Set<ServerName> secAndTerSet = primaryToSecTerRSMap.get(primaryRS);
         if (secAndTerSet == null) {
-          secAndTerSet = new HashSet<ServerName>();
+          secAndTerSet = new HashSet<>();
         }
         secAndTerSet.add(secondaryRS);
         secAndTerSet.add(tertiaryRS);
@@ -451,7 +437,7 @@ public class AssignmentVerificationReport {
    *
    */
   public List<Float> getDispersionInformation() {
-    List<Float> dispersion = new ArrayList<Float>();
+    List<Float> dispersion = new ArrayList<>();
     dispersion.add(avgDispersionScore);
     dispersion.add(maxDispersionScore);
     dispersion.add(minDispersionScore);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java
index 606dce4..d8c511e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java
@@ -59,7 +59,7 @@ public class BulkReOpen extends BulkAssigner {
         .entrySet()) {
       final List<HRegionInfo> hris = e.getValue();
       // add plans for the regions that need to be reopened
-      Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>();
+      Map<String, RegionPlan> plans = new HashMap<>();
       for (HRegionInfo hri : hris) {
         RegionPlan reOpenPlan = assignmentManager.getRegionReopenPlan(hri);
         plans.put(hri.getEncodedName(), reOpenPlan);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index ef042af..affd44c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -151,9 +151,8 @@ public class CatalogJanitor extends ScheduledChore {
     final AtomicInteger count = new AtomicInteger(0);
     // Keep Map of found split parents.  There are candidates for cleanup.
     // Use a comparator that has split parents come before its daughters.
-    final Map<HRegionInfo, Result> splitParents =
-      new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
-    final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
+    final Map<HRegionInfo, Result> splitParents = new TreeMap<>(new SplitParentFirstComparator());
+    final Map<HRegionInfo, Result> mergedRegions = new TreeMap<>();
     // This visitor collects split parents and counts rows in the hbase:meta table
 
     MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
@@ -181,8 +180,7 @@ public class CatalogJanitor extends ScheduledChore {
     // the start row
     MetaTableAccessor.scanMetaForTableRegions(this.connection, visitor, tableName);
 
-    return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
-        count.get(), mergedRegions, splitParents);
+    return new Triple<>(count.get(), mergedRegions, splitParents);
   }
 
   /**
@@ -275,7 +273,7 @@ public class CatalogJanitor extends ScheduledChore {
       // Now work on our list of found parents. See if any we can clean up.
       int splitCleaned = 0;
       // regions whose parents are still around
-      HashSet<String> parentNotCleaned = new HashSet<String>();
+      HashSet<String> parentNotCleaned = new HashSet<>();
       for (Map.Entry<HRegionInfo, Result> e : splitParents.entrySet()) {
         if (this.services.isInMaintenanceMode()) {
           // Stop cleaning if the master is in maintenance mode
@@ -398,7 +396,7 @@ public class CatalogJanitor extends ScheduledChore {
   Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
   throws IOException {
     if (daughter == null)  {
-      return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
+      return new Pair<>(Boolean.FALSE, Boolean.FALSE);
     }
 
     FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
@@ -411,12 +409,12 @@ public class CatalogJanitor extends ScheduledChore {
 
     try {
       if (!FSUtils.isExists(fs, daughterRegionDir)) {
-        return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
+        return new Pair<>(Boolean.FALSE, Boolean.FALSE);
       }
     } catch (IOException ioe) {
       LOG.error("Error trying to determine if daughter region exists, " +
                "assuming exists and has references", ioe);
-      return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
+      return new Pair<>(Boolean.TRUE, Boolean.TRUE);
     }
 
     boolean references = false;
@@ -433,9 +431,9 @@ public class CatalogJanitor extends ScheduledChore {
     } catch (IOException e) {
       LOG.error("Error trying to determine referenced files from : " + daughter.getEncodedName()
           + ", to: " + parent.getEncodedName() + " assuming has references", e);
-      return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
+      return new Pair<>(Boolean.TRUE, Boolean.TRUE);
     }
-    return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
+    return new Pair<>(Boolean.TRUE, Boolean.valueOf(references));
   }
 
   private HTableDescriptor getTableDescriptor(final TableName tableName)

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
index 52af89e..bf3ae7e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
@@ -122,7 +122,7 @@ class ClusterSchemaServiceImpl implements ClusterSchemaService {
     checkIsRunning();
     Set<NamespaceDescriptor> set = getTableNamespaceManager().list();
     if (set == null || set.isEmpty()) return EMPTY_NAMESPACE_LIST;
-    List<NamespaceDescriptor> list = new ArrayList<NamespaceDescriptor>(set.size());
+    List<NamespaceDescriptor> list = new ArrayList<>(set.size());
     list.addAll(set);
     return Collections.unmodifiableList(list);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
index 3b19ada..ea5516d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
@@ -97,8 +97,7 @@ public class ClusterStatusPublisher extends ScheduledChore {
   private long lastMessageTime = 0;
   private final HMaster master;
   private final int messagePeriod; // time between two message
-  private final ConcurrentMap<ServerName, Integer> lastSent =
-      new ConcurrentHashMap<ServerName, Integer>();
+  private final ConcurrentMap<ServerName, Integer> lastSent = new ConcurrentHashMap<>();
   private Publisher publisher;
   private boolean connected = false;
 
@@ -194,7 +193,7 @@ public class ClusterStatusPublisher extends ScheduledChore {
     }
 
     // We're sending the new deads first.
-    List<Map.Entry<ServerName, Integer>> entries = new ArrayList<Map.Entry<ServerName, Integer>>();
+    List<Map.Entry<ServerName, Integer>> entries = new ArrayList<>();
     entries.addAll(lastSent.entrySet());
     Collections.sort(entries, new Comparator<Map.Entry<ServerName, Integer>>() {
       @Override
@@ -205,7 +204,7 @@ public class ClusterStatusPublisher extends ScheduledChore {
 
     // With a limit of MAX_SERVER_PER_MESSAGE
     int max = entries.size() > MAX_SERVER_PER_MESSAGE ? MAX_SERVER_PER_MESSAGE : entries.size();
-    List<ServerName> res = new ArrayList<ServerName>(max);
+    List<ServerName> res = new ArrayList<>(max);
 
     for (int i = 0; i < max; i++) {
       Map.Entry<ServerName, Integer> toSend = entries.get(i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
index c33cdcc..faceba2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
@@ -51,7 +51,7 @@ public class DeadServer {
    * and it's server logs are recovered, it will be told to call server startup
    * because by then, its regions have probably been reassigned.
    */
-  private final Map<ServerName, Long> deadServers = new HashMap<ServerName, Long>();
+  private final Map<ServerName, Long> deadServers = new HashMap<>();
 
   /**
    * Number of dead servers currently being processed
@@ -102,7 +102,7 @@ public class DeadServer {
   public synchronized boolean areDeadServersInProgress() { return processing; }
 
   public synchronized Set<ServerName> copyServerNames() {
-    Set<ServerName> clone = new HashSet<ServerName>(deadServers.size());
+    Set<ServerName> clone = new HashSet<>(deadServers.size());
     clone.addAll(deadServers.keySet());
     return clone;
   }
@@ -177,11 +177,11 @@ public class DeadServer {
    * @return a sorted array list, by death time, lowest values first.
    */
   public synchronized List<Pair<ServerName, Long>> copyDeadServersSince(long ts){
-    List<Pair<ServerName, Long>> res =  new ArrayList<Pair<ServerName, Long>>(size());
+    List<Pair<ServerName, Long>> res =  new ArrayList<>(size());
 
     for (Map.Entry<ServerName, Long> entry:deadServers.entrySet()){
       if (entry.getValue() >= ts){
-        res.add(new Pair<ServerName, Long>(entry.getKey(), entry.getValue()));
+        res.add(new Pair<>(entry.getKey(), entry.getValue()));
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java
index 43ea523..fc3607f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java
@@ -43,8 +43,7 @@ import org.apache.hadoop.hbase.ServerName;
 public class GeneralBulkAssigner extends BulkAssigner {
   private static final Log LOG = LogFactory.getLog(GeneralBulkAssigner.class);
 
-  private Map<ServerName, List<HRegionInfo>> failedPlans
-    = new ConcurrentHashMap<ServerName, List<HRegionInfo>>();
+  private Map<ServerName, List<HRegionInfo>> failedPlans = new ConcurrentHashMap<>();
   private ExecutorService pool;
 
   final Map<ServerName, List<HRegionInfo>> bulkPlan;
@@ -82,7 +81,7 @@ public class GeneralBulkAssigner extends BulkAssigner {
   @Override
   protected boolean waitUntilDone(final long timeout)
   throws InterruptedException {
-    Set<HRegionInfo> regionSet = new HashSet<HRegionInfo>();
+    Set<HRegionInfo> regionSet = new HashSet<>();
     for (List<HRegionInfo> regionList : bulkPlan.values()) {
       regionSet.addAll(regionList);
     }
@@ -164,7 +163,7 @@ public class GeneralBulkAssigner extends BulkAssigner {
   }
 
   private int reassignFailedPlans() {
-    List<HRegionInfo> reassigningRegions = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> reassigningRegions = new ArrayList<>();
     for (Map.Entry<ServerName, List<HRegionInfo>> e : failedPlans.entrySet()) {
       LOG.info("Failed assigning " + e.getValue().size()
           + " regions to server " + e.getKey() + ", reassigning them");

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 78f1783..501d3bd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1063,7 +1063,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
    //start the hfile archive cleaner thread
     Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
-    Map<String, Object> params = new HashMap<String, Object>();
+    Map<String, Object> params = new HashMap<>();
     params.put(MASTER, this);
     this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
         .getFileSystem(), archiveDir, params);
@@ -1327,7 +1327,7 @@ public class HMaster extends HRegionServer implements MasterServices {
       Map<TableName, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
         this.assignmentManager.getRegionStates().getAssignmentsByTable();
 
-      List<RegionPlan> plans = new ArrayList<RegionPlan>();
+      List<RegionPlan> plans = new ArrayList<>();
 
       //Give the balancer the current cluster state.
       this.balancer.setClusterStatus(getClusterStatus());
@@ -2235,8 +2235,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   Pair<HRegionInfo, ServerName> getTableRegionForRow(
       final TableName tableName, final byte [] rowKey)
   throws IOException {
-    final AtomicReference<Pair<HRegionInfo, ServerName>> result =
-      new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
+    final AtomicReference<Pair<HRegionInfo, ServerName>> result = new AtomicReference<>(null);
 
     MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
         @Override
@@ -2345,7 +2344,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     List<ServerName> backupMasters = null;
     if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
-      backupMasters = new ArrayList<ServerName>(backupMasterStrings.size());
+      backupMasters = new ArrayList<>(backupMasterStrings.size());
       for (String s: backupMasterStrings) {
         try {
           byte [] bytes;
@@ -2852,7 +2851,7 @@ public class HMaster extends HRegionServer implements MasterServices {
    */
   List<NamespaceDescriptor> getNamespaces() throws IOException {
     checkInitialized();
-    final List<NamespaceDescriptor> nsds = new ArrayList<NamespaceDescriptor>();
+    final List<NamespaceDescriptor> nsds = new ArrayList<>();
     boolean bypass = false;
     if (cpHost != null) {
       bypass = cpHost.preListNamespaceDescriptors(nsds);
@@ -2918,7 +2917,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   public List<HTableDescriptor> listTableDescriptors(final String namespace, final String regex,
       final List<TableName> tableNameList, final boolean includeSysTables)
   throws IOException {
-    List<HTableDescriptor> htds = new ArrayList<HTableDescriptor>();
+    List<HTableDescriptor> htds = new ArrayList<>();
     boolean bypass = cpHost != null?
         cpHost.preGetTableDescriptors(tableNameList, htds, regex): false;
     if (!bypass) {
@@ -2939,13 +2938,13 @@ public class HMaster extends HRegionServer implements MasterServices {
    */
   public List<TableName> listTableNames(final String namespace, final String regex,
       final boolean includeSysTables) throws IOException {
-    List<HTableDescriptor> htds = new ArrayList<HTableDescriptor>();
+    List<HTableDescriptor> htds = new ArrayList<>();
     boolean bypass = cpHost != null? cpHost.preGetTableNames(htds, regex): false;
     if (!bypass) {
       htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables);
       if (cpHost != null) cpHost.postGetTableNames(htds, regex);
     }
-    List<TableName> result = new ArrayList<TableName>(htds.size());
+    List<TableName> result = new ArrayList<>(htds.size());
     for (HTableDescriptor htd: htds) result.add(htd.getTableName());
     return result;
   }
@@ -3262,7 +3261,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   @Override
   public List<ServerName> listDrainingRegionServers() {
     String parentZnode = getZooKeeper().znodePaths.drainingZNode;
-    List<ServerName> serverNames = new ArrayList<ServerName>();
+    List<ServerName> serverNames = new ArrayList<>();
     List<String> serverStrs = null;
     try {
       serverStrs = ZKUtil.listChildrenNoWatch(getZooKeeper(), parentZnode);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
index 5e1917b..1988e2d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
@@ -100,7 +100,7 @@ public class MasterMetaBootstrap {
       throws IOException, InterruptedException, KeeperException {
     int numReplicas = master.getConfiguration().getInt(HConstants.META_REPLICAS_NUM,
            HConstants.DEFAULT_META_REPLICA_NUM);
-    final Set<ServerName> EMPTY_SET = new HashSet<ServerName>();
+    final Set<ServerName> EMPTY_SET = new HashSet<>();
     for (int i = 1; i < numReplicas; i++) {
       assignMeta(EMPTY_SET, i);
     }
@@ -241,7 +241,7 @@ public class MasterMetaBootstrap {
    */
   private Set<ServerName> getPreviouselyFailedMetaServersFromZK() throws KeeperException {
     final ZooKeeperWatcher zooKeeper = master.getZooKeeper();
-    Set<ServerName> result = new HashSet<ServerName>();
+    Set<ServerName> result = new HashSet<>();
     String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.znodePaths.recoveringRegionsZNode,
       HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
     List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
index fc0ecfb..2b1232a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
@@ -56,7 +56,7 @@ public class MasterMobCompactionThread {
     final String n = Thread.currentThread().getName();
     // this pool is used to run the mob compaction
     this.masterMobPool = new ThreadPoolExecutor(1, 2, 60, TimeUnit.SECONDS,
-      new SynchronousQueue<Runnable>(), new ThreadFactory() {
+      new SynchronousQueue<>(), new ThreadFactory() {
         @Override
         public Thread newThread(Runnable r) {
           String name = n + "-MasterMobCompaction-" + EnvironmentEdgeManager.currentTime();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 3beda05..177ee32 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -234,7 +234,7 @@ public class MasterRpcServices extends RSRpcServices
    * @return list of blocking services and their security info classes that this server supports
    */
   protected List<BlockingServiceAndInterface> getServices() {
-    List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(5);
+    List<BlockingServiceAndInterface> bssi = new ArrayList<>(5);
     bssi.add(new BlockingServiceAndInterface(
       MasterService.newReflectiveBlockingService(this),
       MasterService.BlockingInterface.class));
@@ -1333,7 +1333,7 @@ public class MasterRpcServices extends RSRpcServices
       Pair<HRegionInfo, ServerName> pair =
         MetaTableAccessor.getRegion(master.getConnection(), regionName);
       if (Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),regionName)) {
-        pair = new Pair<HRegionInfo, ServerName>(HRegionInfo.FIRST_META_REGIONINFO,
+        pair = new Pair<>(HRegionInfo.FIRST_META_REGIONINFO,
             master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper()));
       }
       if (pair == null) {
@@ -1491,7 +1491,7 @@ public class MasterRpcServices extends RSRpcServices
       throw new DoNotRetryIOException("Table " + tableName + " is not enabled");
     }
     boolean allFiles = false;
-    List<HColumnDescriptor> compactedColumns = new ArrayList<HColumnDescriptor>();
+    List<HColumnDescriptor> compactedColumns = new ArrayList<>();
     HColumnDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
     byte[] family = null;
     if (request.hasFamily()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
index 1f9729c..27aca94 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
@@ -155,7 +155,7 @@ public class MasterWalManager {
     boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
         WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);
 
-    Set<ServerName> serverNames = new HashSet<ServerName>();
+    Set<ServerName> serverNames = new HashSet<>();
     Path logsDirPath = new Path(this.rootDir, HConstants.HREGION_LOGDIR_NAME);
 
     do {
@@ -218,7 +218,7 @@ public class MasterWalManager {
   }
 
   public void splitLog(final ServerName serverName) throws IOException {
-    Set<ServerName> serverNames = new HashSet<ServerName>();
+    Set<ServerName> serverNames = new HashSet<>();
     serverNames.add(serverName);
     splitLog(serverNames);
   }
@@ -228,7 +228,7 @@ public class MasterWalManager {
    * @param serverName logs belonging to this server will be split
    */
   public void splitMetaLog(final ServerName serverName) throws IOException {
-    Set<ServerName> serverNames = new HashSet<ServerName>();
+    Set<ServerName> serverNames = new HashSet<>();
     serverNames.add(serverName);
     splitMetaLog(serverNames);
   }
@@ -245,7 +245,7 @@ public class MasterWalManager {
       "We only release this lock when we set it. Updates to code that uses it should verify use " +
       "of the guard boolean.")
   private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
-    List<Path> logDirs = new ArrayList<Path>();
+    List<Path> logDirs = new ArrayList<>();
     boolean needReleaseLock = false;
     if (!this.services.isInitialized()) {
       // during master initialization, we could have multiple places splitting a same wal

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RackManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RackManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RackManager.java
index 2b1fb50..5c06857 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RackManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RackManager.java
@@ -80,7 +80,7 @@ public class RackManager {
   public List<String> getRack(List<ServerName> servers) {
     // just a note - switchMapping caches results (at least the implementation should unless the
     // resolution is really a lightweight process)
-    List<String> serversAsString = new ArrayList<String>(servers.size());
+    List<String> serversAsString = new ArrayList<>(servers.size());
     for (ServerName server : servers) {
       serversAsString.add(server.getHostname());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 7acf9df..ffc3e5b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -106,7 +106,7 @@ public class RegionPlacementMaintainer {
     this.conf = conf;
     this.enforceLocality = enforceLocality;
     this.enforceMinAssignmentMove = enforceMinAssignmentMove;
-    this.targetTableSet = new HashSet<TableName>();
+    this.targetTableSet = new HashSet<>();
     this.rackManager = new RackManager(conf);
     try {
       this.connection = ConnectionFactory.createConnection(this.conf);
@@ -163,7 +163,7 @@ public class RegionPlacementMaintainer {
     if (this.enforceLocality == true) {
       regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
     }
-    List<AssignmentVerificationReport> reports = new ArrayList<AssignmentVerificationReport>();
+    List<AssignmentVerificationReport> reports = new ArrayList<>();
     // Iterate all the tables to fill up the verification report
     for (TableName table : tables) {
       if (!this.targetTableSet.isEmpty() &&
@@ -204,7 +204,7 @@ public class RegionPlacementMaintainer {
         assignmentSnapshot.getRegionToRegionServerMap();
 
       // Get the all the region servers
-      List<ServerName> servers = new ArrayList<ServerName>();
+      List<ServerName> servers = new ArrayList<>();
       try (Admin admin = this.connection.getAdmin()) {
         servers.addAll(admin.getClusterStatus().getServers());
       }
@@ -255,15 +255,14 @@ public class RegionPlacementMaintainer {
         // Compute the total rack locality for each region in each rack. The total
         // rack locality is the sum of the localities of a region on all servers in
         // a rack.
-        Map<String, Map<HRegionInfo, Float>> rackRegionLocality =
-            new HashMap<String, Map<HRegionInfo, Float>>();
+        Map<String, Map<HRegionInfo, Float>> rackRegionLocality = new HashMap<>();
         for (int i = 0; i < numRegions; i++) {
           HRegionInfo region = regions.get(i);
           for (int j = 0; j < regionSlots; j += slotsPerServer) {
             String rack = rackManager.getRack(servers.get(j / slotsPerServer));
             Map<HRegionInfo, Float> rackLocality = rackRegionLocality.get(rack);
             if (rackLocality == null) {
-              rackLocality = new HashMap<HRegionInfo, Float>();
+              rackLocality = new HashMap<>();
               rackRegionLocality.put(rack, rackLocality);
             }
             Float localityObj = rackLocality.get(region);
@@ -395,8 +394,7 @@ public class RegionPlacementMaintainer {
         tertiaryAssignment = randomizedMatrix.invertIndices(tertiaryAssignment);
 
         for (int i = 0; i < numRegions; i++) {
-          List<ServerName> favoredServers =
-            new ArrayList<ServerName>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
+          List<ServerName> favoredServers = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
           ServerName s = servers.get(primaryAssignment[i] / slotsPerServer);
           favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
               ServerName.NON_STARTCODE));
@@ -417,7 +415,7 @@ public class RegionPlacementMaintainer {
         LOG.info("Assignment plan for secondary and tertiary generated " +
             "using MunkresAssignment");
       } else {
-        Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
+        Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<>();
         for (int i = 0; i < numRegions; i++) {
           primaryRSMap.put(regions.get(i), servers.get(primaryAssignment[i] / slotsPerServer));
         }
@@ -427,8 +425,7 @@ public class RegionPlacementMaintainer {
         Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
             favoredNodeHelper.placeSecondaryAndTertiaryWithRestrictions(primaryRSMap);
         for (int i = 0; i < numRegions; i++) {
-          List<ServerName> favoredServers =
-            new ArrayList<ServerName>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
+          List<ServerName> favoredServers = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
           HRegionInfo currentRegion = regions.get(i);
           ServerName s = primaryRSMap.get(currentRegion);
           favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
@@ -614,8 +611,7 @@ public class RegionPlacementMaintainer {
     if (plan == null) return;
     LOG.info("========== Start to print the assignment plan ================");
     // sort the map based on region info
-    Map<String, List<ServerName>> assignmentMap =
-      new TreeMap<String, List<ServerName>>(plan.getAssignmentMap());
+    Map<String, List<ServerName>> assignmentMap = new TreeMap<>(plan.getAssignmentMap());
 
     for (Map.Entry<String, List<ServerName>> entry : assignmentMap.entrySet()) {
 
@@ -666,13 +662,11 @@ public class RegionPlacementMaintainer {
 
     // track of the failed and succeeded updates
     int succeededNum = 0;
-    Map<ServerName, Exception> failedUpdateMap =
-      new HashMap<ServerName, Exception>();
+    Map<ServerName, Exception> failedUpdateMap = new HashMap<>();
 
     for (Map.Entry<ServerName, List<HRegionInfo>> entry :
       currentAssignment.entrySet()) {
-      List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos =
-          new ArrayList<Pair<HRegionInfo, List<ServerName>>>();
+      List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
       try {
         // Keep track of the favored updates for the current region server
         FavoredNodesPlan singleServerPlan = null;
@@ -687,8 +681,7 @@ public class RegionPlacementMaintainer {
             }
             // Update the single server update
             singleServerPlan.updateFavoredNodesMap(region, favoredServerList);
-            regionUpdateInfos.add(
-              new Pair<HRegionInfo, List<ServerName>>(region, favoredServerList));
+            regionUpdateInfos.add(new Pair<>(region, favoredServerList));
           }
         }
         if (singleServerPlan != null) {
@@ -749,7 +742,7 @@ public class RegionPlacementMaintainer {
    */
   public Map<TableName, Integer> getRegionsMovement(FavoredNodesPlan newPlan)
       throws IOException {
-    Map<TableName, Integer> movesPerTable = new HashMap<TableName, Integer>();
+    Map<TableName, Integer> movesPerTable = new HashMap<>();
     SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot();
     Map<TableName, List<HRegionInfo>> tableToRegions = snapshot
         .getTableToRegionMap();
@@ -944,7 +937,7 @@ public class RegionPlacementMaintainer {
     if (favoredNodesArray == null)
       return null;
 
-    List<ServerName> serverList = new ArrayList<ServerName>();
+    List<ServerName> serverList = new ArrayList<>();
     for (String hostNameAndPort : favoredNodesArray) {
       serverList.add(ServerName.valueOf(hostNameAndPort, ServerName.NON_STARTCODE));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 4125eea..a1e24f2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -80,41 +80,35 @@ public class RegionStates {
   /**
    * Regions currently in transition.
    */
-  final HashMap<String, RegionState> regionsInTransition =
-    new HashMap<String, RegionState>();
+  final HashMap<String, RegionState> regionsInTransition = new HashMap<>();
 
   /**
    * Region encoded name to state map.
    * All the regions should be in this map.
    */
-  private final Map<String, RegionState> regionStates =
-    new HashMap<String, RegionState>();
+  private final Map<String, RegionState> regionStates = new HashMap<>();
 
   /**
    * Holds mapping of table -> region state
    */
-  private final Map<TableName, Map<String, RegionState>> regionStatesTableIndex =
-      new HashMap<TableName, Map<String, RegionState>>();
+  private final Map<TableName, Map<String, RegionState>> regionStatesTableIndex = new HashMap<>();
 
   /**
    * Server to regions assignment map.
    * Contains the set of regions currently assigned to a given server.
    */
-  private final Map<ServerName, Set<HRegionInfo>> serverHoldings =
-    new HashMap<ServerName, Set<HRegionInfo>>();
+  private final Map<ServerName, Set<HRegionInfo>> serverHoldings = new HashMap<>();
 
   /**
    * Maintains the mapping from the default region to the replica regions.
    */
-  private final Map<HRegionInfo, Set<HRegionInfo>> defaultReplicaToOtherReplicas =
-    new HashMap<HRegionInfo, Set<HRegionInfo>>();
+  private final Map<HRegionInfo, Set<HRegionInfo>> defaultReplicaToOtherReplicas = new HashMap<>();
 
   /**
    * Region to server assignment map.
    * Contains the server a given region is currently assigned to.
    */
-  private final TreeMap<HRegionInfo, ServerName> regionAssignments =
-    new TreeMap<HRegionInfo, ServerName>();
+  private final TreeMap<HRegionInfo, ServerName> regionAssignments = new TreeMap<>();
 
   /**
    * Encoded region name to server assignment map for re-assignment
@@ -126,8 +120,7 @@ public class RegionStates {
    * is offline while the info in lastAssignments is cleared when
    * the region is closed or the server is dead and processed.
    */
-  private final HashMap<String, ServerName> lastAssignments =
-    new HashMap<String, ServerName>();
+  private final HashMap<String, ServerName> lastAssignments = new HashMap<>();
 
   /**
    * Encoded region name to server assignment map for the
@@ -138,16 +131,14 @@ public class RegionStates {
    * to match the meta. We need this map to find out the old server
    * whose serverHoldings needs cleanup, given a moved region.
    */
-  private final HashMap<String, ServerName> oldAssignments =
-    new HashMap<String, ServerName>();
+  private final HashMap<String, ServerName> oldAssignments = new HashMap<>();
 
   /**
    * Map a host port pair string to the latest start code
    * of a region server which is known to be dead. It is dead
    * to us, but server manager may not know it yet.
    */
-  private final HashMap<String, Long> deadServers =
-    new HashMap<String, Long>();
+  private final HashMap<String, Long> deadServers = new HashMap<>();
 
   /**
    * Map a dead servers to the time when log split is done.
@@ -156,8 +147,7 @@ public class RegionStates {
    * on a configured time. By default, we assume a dead
    * server should be done with log splitting in two hours.
    */
-  private final HashMap<ServerName, Long> processedServers =
-    new HashMap<ServerName, Long>();
+  private final HashMap<ServerName, Long> processedServers = new HashMap<>();
   private long lastProcessedServerCleanTime;
 
   private final TableStateManager tableStateManager;
@@ -181,7 +171,7 @@ public class RegionStates {
    * @return a copy of the region assignment map
    */
   public synchronized Map<HRegionInfo, ServerName> getRegionAssignments() {
-    return new TreeMap<HRegionInfo, ServerName>(regionAssignments);
+    return new TreeMap<>(regionAssignments);
   }
 
   /**
@@ -191,7 +181,7 @@ public class RegionStates {
    */
   synchronized Map<ServerName, List<HRegionInfo>> getRegionAssignments(
     Collection<HRegionInfo> regions) {
-    Map<ServerName, List<HRegionInfo>> map = new HashMap<ServerName, List<HRegionInfo>>();
+    Map<ServerName, List<HRegionInfo>> map = new HashMap<>();
     for (HRegionInfo region : regions) {
       HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(region);
       Set<HRegionInfo> allReplicas = defaultReplicaToOtherReplicas.get(defaultReplica);
@@ -201,7 +191,7 @@ public class RegionStates {
           if (server != null) {
             List<HRegionInfo> regionsOnServer = map.get(server);
             if (regionsOnServer == null) {
-              regionsOnServer = new ArrayList<HRegionInfo>(1);
+              regionsOnServer = new ArrayList<>(1);
               map.put(server, regionsOnServer);
             }
             regionsOnServer.add(hri);
@@ -220,11 +210,11 @@ public class RegionStates {
    * Get regions in transition and their states
    */
   public synchronized Set<RegionState> getRegionsInTransition() {
-    return new HashSet<RegionState>(regionsInTransition.values());
+    return new HashSet<>(regionsInTransition.values());
   }
 
   public synchronized SortedSet<RegionState> getRegionsInTransitionOrderedByTimestamp() {
-    final TreeSet<RegionState> rit = new TreeSet<RegionState>(REGION_STATE_COMPARATOR);
+    final TreeSet<RegionState> rit = new TreeSet<>(REGION_STATE_COMPARATOR);
     for (RegionState rs: regionsInTransition.values()) {
       rit.add(rs);
     }
@@ -404,7 +394,7 @@ public class RegionStates {
     RegionState oldState = regionStates.put(encodedName, regionState);
     Map<String, RegionState> map = regionStatesTableIndex.get(table);
     if (map == null) {
-      map = new HashMap<String, RegionState>();
+      map = new HashMap<>();
       regionStatesTableIndex.put(table, map);
     }
     map.put(encodedName, regionState);
@@ -483,7 +473,7 @@ public class RegionStates {
   private void addToServerHoldings(ServerName serverName, HRegionInfo hri) {
     Set<HRegionInfo> regions = serverHoldings.get(serverName);
     if (regions == null) {
-      regions = new HashSet<HRegionInfo>();
+      regions = new HashSet<>();
       serverHoldings.put(serverName, regions);
     }
     regions.add(hri);
@@ -494,7 +484,7 @@ public class RegionStates {
     Set<HRegionInfo> replicas =
         defaultReplicaToOtherReplicas.get(defaultReplica);
     if (replicas == null) {
-      replicas = new HashSet<HRegionInfo>();
+      replicas = new HashSet<>();
       defaultReplicaToOtherReplicas.put(defaultReplica, replicas);
     }
     replicas.add(hri);
@@ -618,16 +608,16 @@ public class RegionStates {
    */
   public List<HRegionInfo> serverOffline(final ServerName sn) {
     // Offline all regions on this server not already in transition.
-    List<HRegionInfo> rits = new ArrayList<HRegionInfo>();
-    Set<HRegionInfo> regionsToCleanIfNoMetaEntry = new HashSet<HRegionInfo>();
+    List<HRegionInfo> rits = new ArrayList<>();
+    Set<HRegionInfo> regionsToCleanIfNoMetaEntry = new HashSet<>();
     // Offline regions outside the loop and synchronized block to avoid
     // ConcurrentModificationException and deadlock in case of meta anassigned,
     // but RegionState a blocked.
-    Set<HRegionInfo> regionsToOffline = new HashSet<HRegionInfo>();
+    Set<HRegionInfo> regionsToOffline = new HashSet<>();
     synchronized (this) {
       Set<HRegionInfo> assignedRegions = serverHoldings.get(sn);
       if (assignedRegions == null) {
-        assignedRegions = new HashSet<HRegionInfo>();
+        assignedRegions = new HashSet<>();
       }
 
       for (HRegionInfo region : assignedRegions) {
@@ -711,7 +701,7 @@ public class RegionStates {
    * @return Online regions from <code>tableName</code>
    */
   public synchronized List<HRegionInfo> getRegionsOfTable(TableName tableName) {
-    List<HRegionInfo> tableRegions = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> tableRegions = new ArrayList<>();
     // boundary needs to have table's name but regionID 0 so that it is sorted
     // before all table's regions.
     HRegionInfo boundary = new HRegionInfo(tableName, null, null, false, 0L);
@@ -733,10 +723,9 @@ public class RegionStates {
    */
   public synchronized Map<RegionState.State, List<HRegionInfo>>
   getRegionByStateOfTable(TableName tableName) {
-    Map<RegionState.State, List<HRegionInfo>> tableRegions =
-        new HashMap<State, List<HRegionInfo>>();
+    Map<RegionState.State, List<HRegionInfo>> tableRegions = new HashMap<>();
     for (State state : State.values()) {
-      tableRegions.put(state, new ArrayList<HRegionInfo>());
+      tableRegions.put(state, new ArrayList<>());
     }
     Map<String, RegionState> indexMap = regionStatesTableIndex.get(tableName);
     if (indexMap == null)
@@ -774,7 +763,7 @@ public class RegionStates {
    * We loop through all regions assuming we don't delete tables too much.
    */
   public void tableDeleted(final TableName tableName) {
-    Set<HRegionInfo> regionsToDelete = new HashSet<HRegionInfo>();
+    Set<HRegionInfo> regionsToDelete = new HashSet<>();
     synchronized (this) {
       for (RegionState state: regionStates.values()) {
         HRegionInfo region = state.getRegion();
@@ -794,7 +783,7 @@ public class RegionStates {
   public synchronized Set<HRegionInfo> getServerRegions(ServerName serverName) {
     Set<HRegionInfo> regions = serverHoldings.get(serverName);
     if (regions == null) return null;
-    return new HashSet<HRegionInfo>(regions);
+    return new HashSet<>(regions);
   }
 
   /**
@@ -954,7 +943,7 @@ public class RegionStates {
    */
   synchronized Map<HRegionInfo, ServerName> closeAllUserRegions(Set<TableName> excludedTables) {
     boolean noExcludeTables = excludedTables == null || excludedTables.isEmpty();
-    Set<HRegionInfo> toBeClosed = new HashSet<HRegionInfo>(regionStates.size());
+    Set<HRegionInfo> toBeClosed = new HashSet<>(regionStates.size());
     for(RegionState state: regionStates.values()) {
       HRegionInfo hri = state.getRegion();
       if (state.isSplit() || hri.isSplit()) {
@@ -966,8 +955,7 @@ public class RegionStates {
         toBeClosed.add(hri);
       }
     }
-    Map<HRegionInfo, ServerName> allUserRegions =
-      new HashMap<HRegionInfo, ServerName>(toBeClosed.size());
+    Map<HRegionInfo, ServerName> allUserRegions = new HashMap<>(toBeClosed.size());
     for (HRegionInfo hri: toBeClosed) {
       RegionState regionState = updateRegionState(hri, State.CLOSED);
       allUserRegions.put(hri, regionState.getServerName());
@@ -1032,7 +1020,7 @@ public class RegionStates {
     for (Map<ServerName, List<HRegionInfo>> map: result.values()) {
       for (ServerName svr: onlineSvrs.keySet()) {
         if (!map.containsKey(svr)) {
-          map.put(svr, new ArrayList<HRegionInfo>());
+          map.put(svr, new ArrayList<>());
         }
       }
       map.keySet().removeAll(drainingServers);
@@ -1041,20 +1029,19 @@ public class RegionStates {
   }
 
   private Map<TableName, Map<ServerName, List<HRegionInfo>>> getTableRSRegionMap(Boolean bytable){
-    Map<TableName, Map<ServerName, List<HRegionInfo>>> result =
-            new HashMap<TableName, Map<ServerName,List<HRegionInfo>>>();
+    Map<TableName, Map<ServerName, List<HRegionInfo>>> result = new HashMap<>();
     for (Map.Entry<ServerName, Set<HRegionInfo>> e: serverHoldings.entrySet()) {
       for (HRegionInfo hri: e.getValue()) {
         if (hri.isMetaRegion()) continue;
         TableName tablename = bytable ? hri.getTable() : TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME);
         Map<ServerName, List<HRegionInfo>> svrToRegions = result.get(tablename);
         if (svrToRegions == null) {
-          svrToRegions = new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());
+          svrToRegions = new HashMap<>(serverHoldings.size());
           result.put(tablename, svrToRegions);
         }
         List<HRegionInfo> regions = svrToRegions.get(e.getKey());
         if (regions == null) {
-          regions = new ArrayList<HRegionInfo>();
+          regions = new ArrayList<>();
           svrToRegions.put(e.getKey(), regions);
         }
         regions.add(hri);
@@ -1072,10 +1059,9 @@ public class RegionStates {
    * @return a Map of ServerName to a List of HRegionInfo's
    */
   protected synchronized Map<ServerName, List<HRegionInfo>> getRegionAssignmentsByServer() {
-    Map<ServerName, List<HRegionInfo>> regionsByServer =
-        new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());
+    Map<ServerName, List<HRegionInfo>> regionsByServer = new HashMap<>(serverHoldings.size());
     for (Map.Entry<ServerName, Set<HRegionInfo>> e: serverHoldings.entrySet()) {
-      regionsByServer.put(e.getKey(), new ArrayList<HRegionInfo>(e.getValue()));
+      regionsByServer.put(e.getKey(), new ArrayList<>(e.getValue()));
     }
     return regionsByServer;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index eb96f97..e6b60d8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -131,29 +131,25 @@ public class ServerManager {
    * The last flushed sequence id for a region.
    */
   private final ConcurrentNavigableMap<byte[], Long> flushedSequenceIdByRegion =
-    new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+    new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
 
   /**
    * The last flushed sequence id for a store in a region.
    */
   private final ConcurrentNavigableMap<byte[], ConcurrentNavigableMap<byte[], Long>>
-    storeFlushedSequenceIdsByRegion =
-    new ConcurrentSkipListMap<byte[], ConcurrentNavigableMap<byte[], Long>>(Bytes.BYTES_COMPARATOR);
+    storeFlushedSequenceIdsByRegion = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
 
   /** Map of registered servers to their current load */
-  private final ConcurrentNavigableMap<ServerName, ServerLoad> onlineServers =
-    new ConcurrentSkipListMap<ServerName, ServerLoad>();
+  private final ConcurrentNavigableMap<ServerName, ServerLoad> onlineServers = new ConcurrentSkipListMap<>();
 
   /**
    * Map of admin interfaces per registered regionserver; these interfaces we use to control
    * regionservers out on the cluster
    */
-  private final Map<ServerName, AdminService.BlockingInterface> rsAdmins =
-    new HashMap<ServerName, AdminService.BlockingInterface>();
+  private final Map<ServerName, AdminService.BlockingInterface> rsAdmins = new HashMap<>();
 
   /** List of region servers that should not get any more new regions. */
-  private final ArrayList<ServerName> drainingServers =
-    new ArrayList<ServerName>();
+  private final ArrayList<ServerName> drainingServers = new ArrayList<>();
 
   private final MasterServices master;
   private final ClusterConnection connection;
@@ -182,7 +178,7 @@ public class ServerManager {
    * So this is a set of region servers known to be dead but not submitted to
    * ServerShutdownHandler for processing yet.
    */
-  private Set<ServerName> queuedDeadServers = new HashSet<ServerName>();
+  private Set<ServerName> queuedDeadServers = new HashSet<>();
 
   /**
    * Set of region servers which are dead and submitted to ServerShutdownHandler to process but not
@@ -199,11 +195,10 @@ public class ServerManager {
    * is currently in startup mode. In this case, the dead server will be parked in this set
    * temporarily.
    */
-  private Map<ServerName, Boolean> requeuedDeadServers
-    = new ConcurrentHashMap<ServerName, Boolean>();
+  private Map<ServerName, Boolean> requeuedDeadServers = new ConcurrentHashMap<>();
 
   /** Listeners that are called on server events. */
-  private List<ServerListener> listeners = new CopyOnWriteArrayList<ServerListener>();
+  private List<ServerListener> listeners = new CopyOnWriteArrayList<>();
 
   /**
    * Constructor.
@@ -1111,7 +1106,7 @@ public class ServerManager {
   public List<ServerName> getOnlineServersList() {
     // TODO: optimize the load balancer call so we don't need to make a new list
     // TODO: FIX. THIS IS POPULAR CALL.
-    return new ArrayList<ServerName>(this.onlineServers.keySet());
+    return new ArrayList<>(this.onlineServers.keySet());
   }
 
   /**
@@ -1139,14 +1134,14 @@ public class ServerManager {
    * @return A copy of the internal list of draining servers.
    */
   public List<ServerName> getDrainingServersList() {
-    return new ArrayList<ServerName>(this.drainingServers);
+    return new ArrayList<>(this.drainingServers);
   }
 
   /**
    * @return A copy of the internal set of deadNotExpired servers.
    */
   Set<ServerName> getDeadNotExpiredServers() {
-    return new HashSet<ServerName>(this.queuedDeadServers);
+    return new HashSet<>(this.queuedDeadServers);
   }
 
   /**
@@ -1287,11 +1282,9 @@ public class ServerManager {
       LOG.warn("Attempting to send favored nodes update rpc to server " + server.toString()
           + " failed because no RPC connection found to this server");
     } else {
-      List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos =
-          new ArrayList<Pair<HRegionInfo, List<ServerName>>>();
+      List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
       for (Entry<HRegionInfo, List<ServerName>> entry : favoredNodes.entrySet()) {
-        regionUpdateInfos.add(new Pair<HRegionInfo, List<ServerName>>(entry.getKey(),
-          entry.getValue()));
+        regionUpdateInfos.add(new Pair<>(entry.getKey(), entry.getValue()));
       }
       UpdateFavoredNodesRequest request =
         RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index 8fedb40..6e477bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -80,19 +80,19 @@ public class SnapshotOfRegionAssignmentFromMeta {
   private final boolean excludeOfflinedSplitParents;
 
   public SnapshotOfRegionAssignmentFromMeta(Connection connection) {
-    this(connection, new HashSet<TableName>(), false);
+    this(connection, new HashSet<>(), false);
   }
 
   public SnapshotOfRegionAssignmentFromMeta(Connection connection, Set<TableName> disabledTables,
       boolean excludeOfflinedSplitParents) {
     this.connection = connection;
-    tableToRegionMap = new HashMap<TableName, List<HRegionInfo>>();
-    regionToRegionServerMap = new HashMap<HRegionInfo, ServerName>();
-    currentRSToRegionMap = new HashMap<ServerName, List<HRegionInfo>>();
-    primaryRSToRegionMap = new HashMap<ServerName, List<HRegionInfo>>();
-    secondaryRSToRegionMap = new HashMap<ServerName, List<HRegionInfo>>();
-    teritiaryRSToRegionMap = new HashMap<ServerName, List<HRegionInfo>>();
-    regionNameToRegionInfoMap = new TreeMap<String, HRegionInfo>();
+    tableToRegionMap = new HashMap<>();
+    regionToRegionServerMap = new HashMap<>();
+    currentRSToRegionMap = new HashMap<>();
+    primaryRSToRegionMap = new HashMap<>();
+    secondaryRSToRegionMap = new HashMap<>();
+    teritiaryRSToRegionMap = new HashMap<>();
+    regionNameToRegionInfoMap = new TreeMap<>();
     existingAssignmentPlan = new FavoredNodesPlan();
     this.disabledTables = disabledTables;
     this.excludeOfflinedSplitParents = excludeOfflinedSplitParents;
@@ -180,7 +180,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
     TableName tableName = regionInfo.getTable();
     List<HRegionInfo> regionList = tableToRegionMap.get(tableName);
     if (regionList == null) {
-      regionList = new ArrayList<HRegionInfo>();
+      regionList = new ArrayList<>();
     }
     // Add the current region info into the tableToRegionMap
     regionList.add(regionInfo);
@@ -196,7 +196,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
     // Process the region server to region map
     List<HRegionInfo> regionList = currentRSToRegionMap.get(server);
     if (regionList == null) {
-      regionList = new ArrayList<HRegionInfo>();
+      regionList = new ArrayList<>();
     }
     regionList.add(regionInfo);
     currentRSToRegionMap.put(server, regionList);
@@ -206,7 +206,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
     // Process the region server to region map
     List<HRegionInfo> regionList = primaryRSToRegionMap.get(server);
     if (regionList == null) {
-      regionList = new ArrayList<HRegionInfo>();
+      regionList = new ArrayList<>();
     }
     regionList.add(regionInfo);
     primaryRSToRegionMap.put(server, regionList);
@@ -216,7 +216,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
     // Process the region server to region map
     List<HRegionInfo> regionList = secondaryRSToRegionMap.get(server);
     if (regionList == null) {
-      regionList = new ArrayList<HRegionInfo>();
+      regionList = new ArrayList<>();
     }
     regionList.add(regionInfo);
     secondaryRSToRegionMap.put(server, regionList);
@@ -226,7 +226,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
     // Process the region server to region map
     List<HRegionInfo> regionList = teritiaryRSToRegionMap.get(server);
     if (regionList == null) {
-      regionList = new ArrayList<HRegionInfo>();
+      regionList = new ArrayList<>();
     }
     regionList.add(regionInfo);
     teritiaryRSToRegionMap.put(server, regionList);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index 20fef35..7017d29 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -118,7 +118,7 @@ public class SplitLogManager {
   protected final ReentrantLock recoveringRegionLock = new ReentrantLock();
 
   @VisibleForTesting
-  final ConcurrentMap<String, Task> tasks = new ConcurrentHashMap<String, Task>();
+  final ConcurrentMap<String, Task> tasks = new ConcurrentHashMap<>();
   private TimeoutMonitor timeoutMonitor;
 
   private volatile Set<ServerName> deadWorkers = null;
@@ -176,7 +176,7 @@ public class SplitLogManager {
   public static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
       final PathFilter filter)
       throws IOException {
-    List<FileStatus> fileStatus = new ArrayList<FileStatus>();
+    List<FileStatus> fileStatus = new ArrayList<>();
     for (Path logDir : logDirs) {
       final FileSystem fs = logDir.getFileSystem(conf);
       if (!fs.exists(logDir)) {
@@ -201,7 +201,7 @@ public class SplitLogManager {
    * @throws IOException
    */
   public long splitLogDistributed(final Path logDir) throws IOException {
-    List<Path> logDirs = new ArrayList<Path>();
+    List<Path> logDirs = new ArrayList<>();
     logDirs.add(logDir);
     return splitLogDistributed(logDirs);
   }
@@ -218,7 +218,7 @@ public class SplitLogManager {
     if (logDirs.isEmpty()) {
       return 0;
     }
-    Set<ServerName> serverNames = new HashSet<ServerName>();
+    Set<ServerName> serverNames = new HashSet<>();
     for (Path logDir : logDirs) {
       try {
         ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(logDir);
@@ -398,7 +398,7 @@ public class SplitLogManager {
     }
     if (serverNames == null || serverNames.isEmpty()) return;
 
-    Set<String> recoveredServerNameSet = new HashSet<String>();
+    Set<String> recoveredServerNameSet = new HashSet<>();
     for (ServerName tmpServerName : serverNames) {
       recoveredServerNameSet.add(tmpServerName.getServerName());
     }
@@ -410,8 +410,7 @@ public class SplitLogManager {
     } catch (IOException e) {
       LOG.warn("removeRecoveringRegions got exception. Will retry", e);
       if (serverNames != null && !serverNames.isEmpty()) {
-        this.failedRecoveringRegionDeletions.add(new Pair<Set<ServerName>, Boolean>(serverNames,
-            isMetaRecovery));
+        this.failedRecoveringRegionDeletions.add(new Pair<>(serverNames, isMetaRecovery));
       }
     } finally {
       this.recoveringRegionLock.unlock();
@@ -426,7 +425,7 @@ public class SplitLogManager {
    */
   void removeStaleRecoveringRegions(final Set<ServerName> failedServers) throws IOException,
       InterruptedIOException {
-    Set<String> knownFailedServers = new HashSet<String>();
+    Set<String> knownFailedServers = new HashSet<>();
     if (failedServers != null) {
       for (ServerName tmpServerName : failedServers) {
         knownFailedServers.add(tmpServerName.getServerName());
@@ -519,7 +518,7 @@ public class SplitLogManager {
     // to reason about concurrency. Makes it easier to retry.
     synchronized (deadWorkersLock) {
       if (deadWorkers == null) {
-        deadWorkers = new HashSet<ServerName>(100);
+        deadWorkers = new HashSet<>(100);
       }
       deadWorkers.add(workerName);
     }
@@ -529,7 +528,7 @@ public class SplitLogManager {
   void handleDeadWorkers(Set<ServerName> serverNames) {
     synchronized (deadWorkersLock) {
       if (deadWorkers == null) {
-        deadWorkers = new HashSet<ServerName>(100);
+        deadWorkers = new HashSet<>(100);
       }
       deadWorkers.addAll(serverNames);
     }
@@ -749,7 +748,7 @@ public class SplitLogManager {
         getSplitLogManagerCoordination().getDetails().getFailedDeletions();
       // Retry previously failed deletes
       if (failedDeletions.size() > 0) {
-        List<String> tmpPaths = new ArrayList<String>(failedDeletions);
+        List<String> tmpPaths = new ArrayList<>(failedDeletions);
         for (String tmpPath : tmpPaths) {
           // deleteNode is an async call
           getSplitLogManagerCoordination().deleteTask(tmpPath);
@@ -766,7 +765,7 @@ public class SplitLogManager {
         // inside the function there have more checks before GC anything
         if (!failedRecoveringRegionDeletions.isEmpty()) {
           List<Pair<Set<ServerName>, Boolean>> previouslyFailedDeletions =
-              new ArrayList<Pair<Set<ServerName>, Boolean>>(failedRecoveringRegionDeletions);
+              new ArrayList<>(failedRecoveringRegionDeletions);
           failedRecoveringRegionDeletions.removeAll(previouslyFailedDeletions);
           for (Pair<Set<ServerName>, Boolean> failedDeletion : previouslyFailedDeletions) {
             removeRecoveringRegions(failedDeletion.getFirst(), failedDeletion.getSecond());