You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2017/03/10 22:09:58 UTC

[18/50] [abbrv] hbase git commit: HBASE-17532 Replaced explicit type with diamond operator

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index a100a15..c2ca3eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -89,8 +89,7 @@ public class FSTableDescriptors implements TableDescriptors {
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.
   // TODO.
-  private final Map<TableName, HTableDescriptor> cache =
-    new ConcurrentHashMap<TableName, HTableDescriptor>();
+  private final Map<TableName, HTableDescriptor> cache = new ConcurrentHashMap<>();
 
   /**
    * Table descriptor for <code>hbase:meta</code> catalog table
@@ -271,7 +270,7 @@ public class FSTableDescriptors implements TableDescriptors {
   @Override
   public Map<String, HTableDescriptor> getAllDescriptors()
   throws IOException {
-    Map<String, HTableDescriptor> tds = new TreeMap<String, HTableDescriptor>();
+    Map<String, HTableDescriptor> tds = new TreeMap<>();
 
     if (fsvisited && usecache) {
       for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) {
@@ -307,7 +306,7 @@ public class FSTableDescriptors implements TableDescriptors {
    */
   @Override
   public Map<String, HTableDescriptor> getAll() throws IOException {
-    Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
+    Map<String, HTableDescriptor> htds = new TreeMap<>();
     Map<String, HTableDescriptor> allDescriptors = getAllDescriptors();
     for (Map.Entry<String, HTableDescriptor> entry : allDescriptors
         .entrySet()) {
@@ -323,7 +322,7 @@ public class FSTableDescriptors implements TableDescriptors {
   @Override
   public Map<String, HTableDescriptor> getByNamespace(String name)
   throws IOException {
-    Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
+    Map<String, HTableDescriptor> htds = new TreeMap<>();
     List<Path> tableDirs =
         FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
     for (Path d: tableDirs) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 84b3436..c78ba06 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -1240,7 +1240,7 @@ public abstract class FSUtils {
   public static Map<String, Integer> getTableFragmentation(
     final FileSystem fs, final Path hbaseRootDir)
   throws IOException {
-    Map<String, Integer> frags = new HashMap<String, Integer>();
+    Map<String, Integer> frags = new HashMap<>();
     int cfCountTotal = 0;
     int cfFragTotal = 0;
     PathFilter regionFilter = new RegionDirFilter(fs);
@@ -1434,7 +1434,7 @@ public abstract class FSUtils {
 
   public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
       throws IOException {
-    List<Path> tableDirs = new LinkedList<Path>();
+    List<Path> tableDirs = new LinkedList<>();
 
     for(FileStatus status :
         fs.globStatus(new Path(rootdir,
@@ -1455,7 +1455,7 @@ public abstract class FSUtils {
       throws IOException {
     // presumes any directory under hbase.rootdir is a table
     FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
-    List<Path> tabledirs = new ArrayList<Path>(dirs.length);
+    List<Path> tabledirs = new ArrayList<>(dirs.length);
     for (FileStatus dir: dirs) {
       tabledirs.add(dir.getPath());
     }
@@ -1511,9 +1511,9 @@ public abstract class FSUtils {
     // assumes we are in a table dir.
     List<FileStatus> rds = listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
     if (rds == null) {
-      return new ArrayList<Path>();
+      return new ArrayList<>();
     }
-    List<Path> regionDirs = new ArrayList<Path>(rds.size());
+    List<Path> regionDirs = new ArrayList<>(rds.size());
     for (FileStatus rdfs: rds) {
       Path rdPath = rdfs.getPath();
       regionDirs.add(rdPath);
@@ -1563,7 +1563,7 @@ public abstract class FSUtils {
   public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
     // assumes we are in a region dir.
     FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
-    List<Path> familyDirs = new ArrayList<Path>(fds.length);
+    List<Path> familyDirs = new ArrayList<>(fds.length);
     for (FileStatus fdfs: fds) {
       Path fdPath = fdfs.getPath();
       familyDirs.add(fdPath);
@@ -1574,9 +1574,9 @@ public abstract class FSUtils {
   public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
     List<FileStatus> fds = listStatusWithStatusFilter(fs, familyDir, new ReferenceFileFilter(fs));
     if (fds == null) {
-      return new ArrayList<Path>();
+      return new ArrayList<>();
     }
-    List<Path> referenceFiles = new ArrayList<Path>(fds.size());
+    List<Path> referenceFiles = new ArrayList<>(fds.size());
     for (FileStatus fdfs: fds) {
       Path fdPath = fdfs.getPath();
       referenceFiles.add(fdPath);
@@ -1709,14 +1709,14 @@ public abstract class FSUtils {
       ExecutorService executor, final ErrorReporter errors) throws IOException, InterruptedException {
 
     final Map<String, Path> finalResultMap =
-        resultMap == null ? new ConcurrentHashMap<String, Path>(128, 0.75f, 32) : resultMap;
+        resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap;
 
     // only include the directory paths to tables
     Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
     // Inside a table, there are compaction.dir directories to skip.  Otherwise, all else
     // should be regions.
     final FamilyDirFilter familyFilter = new FamilyDirFilter(fs);
-    final Vector<Exception> exceptions = new Vector<Exception>();
+    final Vector<Exception> exceptions = new Vector<>();
 
     try {
       List<FileStatus> regionDirs = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
@@ -1724,7 +1724,7 @@ public abstract class FSUtils {
         return finalResultMap;
       }
 
-      final List<Future<?>> futures = new ArrayList<Future<?>>(regionDirs.size());
+      final List<Future<?>> futures = new ArrayList<>(regionDirs.size());
 
       for (FileStatus regionDir : regionDirs) {
         if (null != errors) {
@@ -1740,7 +1740,7 @@ public abstract class FSUtils {
           @Override
           public void run() {
             try {
-              HashMap<String,Path> regionStoreFileMap = new HashMap<String, Path>();
+              HashMap<String,Path> regionStoreFileMap = new HashMap<>();
               List<FileStatus> familyDirs = FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter);
               if (familyDirs == null) {
                 if (!fs.exists(dd)) {
@@ -1785,7 +1785,7 @@ public abstract class FSUtils {
           Future<?> future = executor.submit(getRegionStoreFileMapCall);
           futures.add(future);
         } else {
-          FutureTask<?> future = new FutureTask<Object>(getRegionStoreFileMapCall, null);
+          FutureTask<?> future = new FutureTask<>(getRegionStoreFileMapCall, null);
           future.run();
           futures.add(future);
         }
@@ -1871,7 +1871,7 @@ public abstract class FSUtils {
     final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter,
     ExecutorService executor, ErrorReporter errors)
   throws IOException, InterruptedException {
-    ConcurrentHashMap<String, Path> map = new ConcurrentHashMap<String, Path>(1024, 0.75f, 32);
+    ConcurrentHashMap<String, Path> map = new ConcurrentHashMap<>(1024, 0.75f, 32);
 
     // if this method looks similar to 'getTableFragmentation' that is because
     // it was borrowed from it.
@@ -1907,7 +1907,7 @@ public abstract class FSUtils {
   public static List<FileStatus> filterFileStatuses(Iterator<FileStatus> input,
       FileStatusFilter filter) {
     if (input == null) return null;
-    ArrayList<FileStatus> results = new ArrayList<FileStatus>();
+    ArrayList<FileStatus> results = new ArrayList<>();
     while (input.hasNext()) {
       FileStatus f = input.next();
       if (filter.accept(f)) {
@@ -2167,8 +2167,7 @@ public abstract class FSUtils {
   public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
       final Configuration conf, final String desiredTable, int threadPoolSize)
       throws IOException {
-    Map<String, Map<String, Float>> regionDegreeLocalityMapping =
-        new ConcurrentHashMap<String, Map<String, Float>>();
+    Map<String, Map<String, Float>> regionDegreeLocalityMapping = new ConcurrentHashMap<>();
     getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
         regionDegreeLocalityMapping);
     return regionDegreeLocalityMapping;
@@ -2253,7 +2252,7 @@ public abstract class FSUtils {
     // run in multiple threads
     ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize,
         threadPoolSize, 60, TimeUnit.SECONDS,
-        new ArrayBlockingQueue<Runnable>(statusList.length));
+        new ArrayBlockingQueue<>(statusList.length));
     try {
       // ignore all file status items that are not of interest
       for (FileStatus regionStatus : statusList) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 4d44187..7b3b25b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -256,7 +256,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
   // limit checking/fixes to listed tables, if empty attempt to check/fix all
   // hbase:meta are always checked
-  private Set<TableName> tablesIncluded = new HashSet<TableName>();
+  private Set<TableName> tablesIncluded = new HashSet<>();
   private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge
   // maximum number of overlapping regions to sideline
   private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;
@@ -280,9 +280,9 @@ public class HBaseFsck extends Configured implements Closeable {
    * name to HbckInfo structure.  The information contained in HbckInfo is used
    * to detect and correct consistency (hdfs/meta/deployment) problems.
    */
-  private TreeMap<String, HbckInfo> regionInfoMap = new TreeMap<String, HbckInfo>();
+  private TreeMap<String, HbckInfo> regionInfoMap = new TreeMap<>();
   // Empty regioninfo qualifiers in hbase:meta
-  private Set<Result> emptyRegionInfoQualifiers = new HashSet<Result>();
+  private Set<Result> emptyRegionInfoQualifiers = new HashSet<>();
 
   /**
    * This map from Tablename -> TableInfo contains the structures necessary to
@@ -294,22 +294,19 @@ public class HBaseFsck extends Configured implements Closeable {
    * unless checkMetaOnly is specified, in which case, it contains only
    * the meta table
    */
-  private SortedMap<TableName, TableInfo> tablesInfo =
-      new ConcurrentSkipListMap<TableName, TableInfo>();
+  private SortedMap<TableName, TableInfo> tablesInfo = new ConcurrentSkipListMap<>();
 
   /**
    * When initially looking at HDFS, we attempt to find any orphaned data.
    */
   private List<HbckInfo> orphanHdfsDirs = Collections.synchronizedList(new ArrayList<HbckInfo>());
 
-  private Map<TableName, Set<String>> orphanTableDirs =
-      new HashMap<TableName, Set<String>>();
-  private Map<TableName, TableState> tableStates =
-      new HashMap<TableName, TableState>();
+  private Map<TableName, Set<String>> orphanTableDirs = new HashMap<>();
+  private Map<TableName, TableState> tableStates = new HashMap<>();
   private final RetryCounterFactory lockFileRetryCounterFactory;
   private final RetryCounterFactory createZNodeRetryCounterFactory;
 
-  private Map<TableName, Set<String>> skippedRegions = new HashMap<TableName, Set<String>>();
+  private Map<TableName, Set<String>> skippedRegions = new HashMap<>();
 
   private ZooKeeperWatcher zkw = null;
   private String hbckEphemeralNodePath = null;
@@ -431,7 +428,7 @@ public class HBaseFsck extends Configured implements Closeable {
     RetryCounter retryCounter = lockFileRetryCounterFactory.create();
     FileLockCallable callable = new FileLockCallable(retryCounter);
     ExecutorService executor = Executors.newFixedThreadPool(1);
-    FutureTask<FSDataOutputStream> futureTask = new FutureTask<FSDataOutputStream>(callable);
+    FutureTask<FSDataOutputStream> futureTask = new FutureTask<>(callable);
     executor.execute(futureTask);
     final int timeoutInSeconds = getConf().getInt(
       "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);
@@ -977,7 +974,7 @@ public class HBaseFsck extends Configured implements Closeable {
         // expand the range to include the range of all hfiles
         if (orphanRegionRange == null) {
           // first range
-          orphanRegionRange = new Pair<byte[], byte[]>(start, end);
+          orphanRegionRange = new Pair<>(start, end);
         } else {
           // TODO add test
 
@@ -1267,7 +1264,7 @@ public class HBaseFsck extends Configured implements Closeable {
     Collection<HbckInfo> hbckInfos = regionInfoMap.values();
 
     // Parallelized read of .regioninfo files.
-    List<WorkItemHdfsRegionInfo> hbis = new ArrayList<WorkItemHdfsRegionInfo>(hbckInfos.size());
+    List<WorkItemHdfsRegionInfo> hbis = new ArrayList<>(hbckInfos.size());
     List<Future<Void>> hbiFutures;
 
     for (HbckInfo hbi : hbckInfos) {
@@ -1323,7 +1320,7 @@ public class HBaseFsck extends Configured implements Closeable {
             //should only report once for each table
             errors.reportError(ERROR_CODE.NO_TABLEINFO_FILE,
                 "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName);
-            Set<String> columns = new HashSet<String>();
+            Set<String> columns = new HashSet<>();
             orphanTableDirs.put(tableName, getColumnFamilyList(columns, hbi));
           }
         }
@@ -1402,7 +1399,7 @@ public class HBaseFsck extends Configured implements Closeable {
   public void fixOrphanTables() throws IOException {
     if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) {
 
-      List<TableName> tmpList = new ArrayList<TableName>(orphanTableDirs.keySet().size());
+      List<TableName> tmpList = new ArrayList<>(orphanTableDirs.keySet().size());
       tmpList.addAll(orphanTableDirs.keySet());
       HTableDescriptor[] htds = getHTableDescriptors(tmpList);
       Iterator<Entry<TableName, Set<String>>> iter =
@@ -1485,7 +1482,7 @@ public class HBaseFsck extends Configured implements Closeable {
    */
   private ArrayList<Put> generatePuts(
       SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
-    ArrayList<Put> puts = new ArrayList<Put>();
+    ArrayList<Put> puts = new ArrayList<>();
     boolean hasProblems = false;
     for (Entry<TableName, TableInfo> e : tablesInfo.entrySet()) {
       TableName name = e.getKey();
@@ -1936,7 +1933,7 @@ public class HBaseFsck extends Configured implements Closeable {
   void processRegionServers(Collection<ServerName> regionServerList)
     throws IOException, InterruptedException {
 
-    List<WorkItemRegion> workItems = new ArrayList<WorkItemRegion>(regionServerList.size());
+    List<WorkItemRegion> workItems = new ArrayList<>(regionServerList.size());
     List<Future<Void>> workFutures;
 
     // loop to contact each region server in parallel
@@ -1966,8 +1963,7 @@ public class HBaseFsck extends Configured implements Closeable {
     // Divide the checks in two phases. One for default/primary replicas and another
     // for the non-primary ones. Keeps code cleaner this way.
 
-    List<CheckRegionConsistencyWorkItem> workItems =
-        new ArrayList<CheckRegionConsistencyWorkItem>(regionInfoMap.size());
+    List<CheckRegionConsistencyWorkItem> workItems = new ArrayList<>(regionInfoMap.size());
     for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
       if (e.getValue().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
         workItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue()));
@@ -1979,8 +1975,7 @@ public class HBaseFsck extends Configured implements Closeable {
     setCheckHdfs(false); //replicas don't have any hdfs data
     // Run a pass over the replicas and fix any assignment issues that exist on the currently
     // deployed/undeployed replicas.
-    List<CheckRegionConsistencyWorkItem> replicaWorkItems =
-        new ArrayList<CheckRegionConsistencyWorkItem>(regionInfoMap.size());
+    List<CheckRegionConsistencyWorkItem> replicaWorkItems = new ArrayList<>(regionInfoMap.size());
     for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
       if (e.getValue().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
         replicaWorkItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue()));
@@ -2065,7 +2060,7 @@ public class HBaseFsck extends Configured implements Closeable {
   private void addSkippedRegion(final HbckInfo hbi) {
     Set<String> skippedRegionNames = skippedRegions.get(hbi.getTableName());
     if (skippedRegionNames == null) {
-      skippedRegionNames = new HashSet<String>();
+      skippedRegionNames = new HashSet<>();
     }
     skippedRegionNames.add(hbi.getRegionNameAsString());
     skippedRegions.put(hbi.getTableName(), skippedRegionNames);
@@ -2570,7 +2565,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * @throws IOException
    */
   SortedMap<TableName, TableInfo> checkIntegrity() throws IOException {
-    tablesInfo = new TreeMap<TableName,TableInfo> ();
+    tablesInfo = new TreeMap<>();
     LOG.debug("There are " + regionInfoMap.size() + " region info entries");
     for (HbckInfo hbi : regionInfoMap.values()) {
       // Check only valid, working regions
@@ -2753,16 +2748,16 @@ public class HBaseFsck extends Configured implements Closeable {
     TreeSet <ServerName> deployedOn;
 
     // backwards regions
-    final List<HbckInfo> backwards = new ArrayList<HbckInfo>();
+    final List<HbckInfo> backwards = new ArrayList<>();
 
     // sidelined big overlapped regions
-    final Map<Path, HbckInfo> sidelinedRegions = new HashMap<Path, HbckInfo>();
+    final Map<Path, HbckInfo> sidelinedRegions = new HashMap<>();
 
     // region split calculator
-    final RegionSplitCalculator<HbckInfo> sc = new RegionSplitCalculator<HbckInfo>(cmp);
+    final RegionSplitCalculator<HbckInfo> sc = new RegionSplitCalculator<>(cmp);
 
     // Histogram of different HTableDescriptors found.  Ideally there is only one!
-    final Set<HTableDescriptor> htds = new HashSet<HTableDescriptor>();
+    final Set<HTableDescriptor> htds = new HashSet<>();
 
     // key = start split, values = set of splits in problem group
     final Multimap<byte[], HbckInfo> overlapGroups =
@@ -2773,7 +2768,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
     TableInfo(TableName name) {
       this.tableName = name;
-      deployedOn = new TreeSet <ServerName>();
+      deployedOn = new TreeSet <>();
     }
 
     /**
@@ -2829,7 +2824,7 @@ public class HBaseFsck extends Configured implements Closeable {
     public synchronized ImmutableList<HRegionInfo> getRegionsFromMeta() {
       // lazy loaded, synchronized to ensure a single load
       if (regionsFromMeta == null) {
-        List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
+        List<HRegionInfo> regions = new ArrayList<>();
         for (HbckInfo h : HBaseFsck.this.regionInfoMap.values()) {
           if (tableName.equals(h.getTableName())) {
             if (h.metaEntry != null) {
@@ -3031,7 +3026,7 @@ public class HBaseFsck extends Configured implements Closeable {
         Pair<byte[], byte[]> range = null;
         for (HbckInfo hi : overlap) {
           if (range == null) {
-            range = new Pair<byte[], byte[]>(hi.getStartKey(), hi.getEndKey());
+            range = new Pair<>(hi.getStartKey(), hi.getEndKey());
           } else {
             if (RegionSplitCalculator.BYTES_COMPARATOR
                 .compare(hi.getStartKey(), range.getFirst()) < 0) {
@@ -3200,7 +3195,7 @@ public class HBaseFsck extends Configured implements Closeable {
           overlapGroups.putAll(problemKey, ranges);
 
           // record errors
-          ArrayList<HbckInfo> subRange = new ArrayList<HbckInfo>(ranges);
+          ArrayList<HbckInfo> subRange = new ArrayList<>(ranges);
           //  this dumb and n^2 but this shouldn't happen often
           for (HbckInfo r1 : ranges) {
             if (r1.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) continue;
@@ -3275,7 +3270,7 @@ public class HBaseFsck extends Configured implements Closeable {
         throws IOException {
       // we parallelize overlap handler for the case we have lots of groups to fix.  We can
       // safely assume each group is independent.
-      List<WorkItemOverlapMerge> merges = new ArrayList<WorkItemOverlapMerge>(overlapGroups.size());
+      List<WorkItemOverlapMerge> merges = new ArrayList<>(overlapGroups.size());
       List<Future<Void>> rets;
       for (Collection<HbckInfo> overlap : overlapGroups.asMap().values()) {
         //
@@ -3364,7 +3359,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * @throws IOException if an error is encountered
    */
   HTableDescriptor[] getTables(AtomicInteger numSkipped) {
-    List<TableName> tableNames = new ArrayList<TableName>();
+    List<TableName> tableNames = new ArrayList<>();
     long now = EnvironmentEdgeManager.currentTime();
 
     for (HbckInfo hbi : regionInfoMap.values()) {
@@ -3429,7 +3424,7 @@ public class HBaseFsck extends Configured implements Closeable {
     * @throws InterruptedException
     */
   boolean checkMetaRegion() throws IOException, KeeperException, InterruptedException {
-    Map<Integer, HbckInfo> metaRegions = new HashMap<Integer, HbckInfo>();
+    Map<Integer, HbckInfo> metaRegions = new HashMap<>();
     for (HbckInfo value : regionInfoMap.values()) {
       if (value.metaEntry != null && value.metaEntry.isMetaRegion()) {
         metaRegions.put(value.getReplicaId(), value);
@@ -3442,7 +3437,7 @@ public class HBaseFsck extends Configured implements Closeable {
     // Check the deployed servers. It should be exactly one server for each replica.
     for (int i = 0; i < metaReplication; i++) {
       HbckInfo metaHbckInfo = metaRegions.remove(i);
-      List<ServerName> servers = new ArrayList<ServerName>();
+      List<ServerName> servers = new ArrayList<>();
       if (metaHbckInfo != null) {
         servers = metaHbckInfo.deployedOn;
       }
@@ -3979,10 +3974,10 @@ public class HBaseFsck extends Configured implements Closeable {
     // How frequently calls to progress() will create output
     private static final int progressThreshold = 100;
 
-    Set<TableInfo> errorTables = new HashSet<TableInfo>();
+    Set<TableInfo> errorTables = new HashSet<>();
 
     // for use by unit tests to verify which errors were discovered
-    private ArrayList<ERROR_CODE> errorList = new ArrayList<ERROR_CODE>();
+    private ArrayList<ERROR_CODE> errorList = new ArrayList<>();
 
     @Override
     public void clear() {
@@ -4183,11 +4178,11 @@ public class HBaseFsck extends Configured implements Closeable {
 
     @Override
     public synchronized Void call() throws InterruptedException, ExecutionException {
-      final Vector<Exception> exceptions = new Vector<Exception>();
+      final Vector<Exception> exceptions = new Vector<>();
 
       try {
         final FileStatus[] regionDirs = fs.listStatus(tableDir.getPath());
-        final List<Future<?>> futures = new ArrayList<Future<?>>(regionDirs.length);
+        final List<Future<?>> futures = new ArrayList<>(regionDirs.length);
 
         for (final FileStatus regionDir : regionDirs) {
           errors.progress();
@@ -4554,7 +4549,7 @@ public class HBaseFsck extends Configured implements Closeable {
   }
 
   Set<TableName> getIncludedTables() {
-    return new HashSet<TableName>(tablesIncluded);
+    return new HashSet<>(tablesIncluded);
   }
 
   /**
@@ -4865,7 +4860,7 @@ public class HBaseFsck extends Configured implements Closeable {
         HFileCorruptionChecker hfcc = createHFileCorruptionChecker(sidelineCorruptHFiles);
         setHFileCorruptionChecker(hfcc); // so we can get result
         Collection<TableName> tables = getIncludedTables();
-        Collection<Path> tableDirs = new ArrayList<Path>();
+        Collection<Path> tableDirs = new ArrayList<>();
         Path rootdir = FSUtils.getRootDir(getConf());
         if (tables.size() > 0) {
           for (TableName t : tables) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
index 7f283e6..e5dbae2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
@@ -58,8 +58,7 @@ public class IdLock {
     }
   }
 
-  private ConcurrentMap<Long, Entry> map =
-      new ConcurrentHashMap<Long, Entry>();
+  private ConcurrentMap<Long, Entry> map = new ConcurrentHashMap<>();
 
   /**
    * Blocks until the lock corresponding to the given id is acquired.

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
index 98ce80d..caf3265 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
@@ -46,8 +46,7 @@ public class IdReadWriteLock {
   private static final int NB_CONCURRENT_LOCKS = 1000;
   // The pool to get entry from, entries are mapped by weak reference to make it able to be
   // garbage-collected asap
-  private final WeakObjectPool<Long, ReentrantReadWriteLock> lockPool =
-      new WeakObjectPool<Long, ReentrantReadWriteLock>(
+  private final WeakObjectPool<Long, ReentrantReadWriteLock> lockPool = new WeakObjectPool<>(
           new WeakObjectPool.ObjectFactory<Long, ReentrantReadWriteLock>() {
             @Override
             public ReentrantReadWriteLock createObject(Long id) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java
index b0bca00..9f4b271 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public abstract class JvmVersion {
-  private static Set<String> BAD_JVM_VERSIONS = new HashSet<String>();
+  private static Set<String> BAD_JVM_VERSIONS = new HashSet<>();
   static {
     BAD_JVM_VERSIONS.add("1.6.0_18");
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index f11d38b..d7749c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -128,9 +128,8 @@ public abstract class ModifyRegionUtils {
       final RegionFillTask task) throws IOException {
     if (newRegions == null) return null;
     int regionNumber = newRegions.length;
-    CompletionService<HRegionInfo> completionService =
-      new ExecutorCompletionService<HRegionInfo>(exec);
-    List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
+    CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<>(exec);
+    List<HRegionInfo> regionInfos = new ArrayList<>();
     for (final HRegionInfo newRegion : newRegions) {
       completionService.submit(new Callable<HRegionInfo>() {
         @Override
@@ -193,8 +192,7 @@ public abstract class ModifyRegionUtils {
    */
   public static void editRegions(final ThreadPoolExecutor exec,
       final Collection<HRegionInfo> regions, final RegionEditTask task) throws IOException {
-    final ExecutorCompletionService<Void> completionService =
-      new ExecutorCompletionService<Void>(exec);
+    final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<>(exec);
     for (final HRegionInfo hri: regions) {
       completionService.submit(new Callable<Void>() {
         @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java
index 8cb880d..4721781 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java
@@ -111,7 +111,7 @@ public class MunkresAssignment {
     mask = new byte[rows][cols];
     rowsCovered = new boolean[rows];
     colsCovered = new boolean[cols];
-    path = new LinkedList<Pair<Integer, Integer>>();
+    path = new LinkedList<>();
 
     leastInRow = new float[rows];
     leastInRowIndex = new int[rows];
@@ -330,8 +330,7 @@ public class MunkresAssignment {
           // starting from the uncovered primed zero (there is only one). Since
           // we have already found it, save it as the first node in the path.
           path.clear();
-          path.offerLast(new Pair<Integer, Integer>(zero.getFirst(),
-              zero.getSecond()));
+          path.offerLast(new Pair<>(zero.getFirst(), zero.getSecond()));
           return true;
         }
       }
@@ -439,7 +438,7 @@ public class MunkresAssignment {
   private Pair<Integer, Integer> findUncoveredZero() {
     for (int r = 0; r < rows; r++) {
       if (leastInRow[r] == 0) {
-        return new Pair<Integer, Integer>(r, leastInRowIndex[r]);
+        return new Pair<>(r, leastInRowIndex[r]);
       }
     }
     return null;
@@ -476,7 +475,7 @@ public class MunkresAssignment {
   private Pair<Integer, Integer> starInRow(int r) {
     for (int c = 0; c < cols; c++) {
       if (mask[r][c] == STAR) {
-        return new Pair<Integer, Integer>(r, c);
+        return new Pair<>(r, c);
       }
     }
     return null;
@@ -491,7 +490,7 @@ public class MunkresAssignment {
   private Pair<Integer, Integer> starInCol(int c) {
     for (int r = 0; r < rows; r++) {
       if (mask[r][c] == STAR) {
-        return new Pair<Integer, Integer>(r, c);
+        return new Pair<>(r, c);
       }
     }
     return null;
@@ -506,7 +505,7 @@ public class MunkresAssignment {
   private Pair<Integer, Integer> primeInRow(int r) {
     for (int c = 0; c < cols; c++) {
       if (mask[r][c] == PRIME) {
-        return new Pair<Integer, Integer>(r, c);
+        return new Pair<>(r, c);
       }
     }
     return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index 01ee201..ce018da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -397,7 +397,7 @@ public class RegionMover extends AbstractHBaseTool {
     LOG.info("Moving " + regionsToMove.size() + " regions to " + server + " using "
         + this.maxthreads + " threads.Ack mode:" + this.ack);
     ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads);
-    List<Future<Boolean>> taskList = new ArrayList<Future<Boolean>>();
+    List<Future<Boolean>> taskList = new ArrayList<>();
     int counter = 0;
     while (counter < regionsToMove.size()) {
       HRegionInfo region = regionsToMove.get(counter);
@@ -461,7 +461,7 @@ public class RegionMover extends AbstractHBaseTool {
       justification="FB is wrong; its size is read")
   private void unloadRegions(Admin admin, String server, ArrayList<String> regionServers,
       boolean ack, List<HRegionInfo> movedRegions) throws Exception {
-    List<HRegionInfo> regionsToMove = new ArrayList<HRegionInfo>();// FindBugs: DLS_DEAD_LOCAL_STORE
+    List<HRegionInfo> regionsToMove = new ArrayList<>();// FindBugs: DLS_DEAD_LOCAL_STORE
     regionsToMove = getRegions(this.conf, server);
     if (regionsToMove.isEmpty()) {
       LOG.info("No Regions to move....Quitting now");
@@ -481,7 +481,7 @@ public class RegionMover extends AbstractHBaseTool {
           + regionServers.size() + " servers using " + this.maxthreads + " threads .Ack Mode:"
           + ack);
       ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads);
-      List<Future<Boolean>> taskList = new ArrayList<Future<Boolean>>();
+      List<Future<Boolean>> taskList = new ArrayList<>();
       int serverIndex = 0;
       while (counter < regionsToMove.size()) {
         if (ack) {
@@ -636,7 +636,7 @@ public class RegionMover extends AbstractHBaseTool {
   }
 
   private List<HRegionInfo> readRegionsFromFile(String filename) throws IOException {
-    List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> regions = new ArrayList<>();
     File f = new File(filename);
     if (!f.exists()) {
       return regions;
@@ -758,7 +758,7 @@ public class RegionMover extends AbstractHBaseTool {
    * @return List of servers from the exclude file in format 'hostname:port'.
    */
   private ArrayList<String> readExcludes(String excludeFile) throws IOException {
-    ArrayList<String> excludeServers = new ArrayList<String>();
+    ArrayList<String> excludeServers = new ArrayList<>();
     if (excludeFile == null) {
       return excludeServers;
     } else {
@@ -821,9 +821,8 @@ public class RegionMover extends AbstractHBaseTool {
    * @throws IOException
    */
   private ArrayList<String> getServers(Admin admin) throws IOException {
-    ArrayList<ServerName> serverInfo =
-        new ArrayList<ServerName>(admin.getClusterStatus().getServers());
-    ArrayList<String> regionServers = new ArrayList<String>(serverInfo.size());
+    ArrayList<ServerName> serverInfo = new ArrayList<>(admin.getClusterStatus().getServers());
+    ArrayList<String> regionServers = new ArrayList<>(serverInfo.size());
     for (ServerName server : serverInfo) {
       regionServers.add(server.getServerName());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
index c616a25..8249630 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
@@ -53,7 +53,7 @@ public class RegionSizeCalculator {
   /**
    * Maps each region to its size in bytes.
    * */
-  private final Map<byte[], Long> sizeMap = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+  private final Map<byte[], Long> sizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 
   static final String ENABLE_REGIONSIZECALCULATOR = "hbase.regionsizecalculator.enable";
   private static final long MEGABYTE = 1024L * 1024L;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java
index eeef1ae..e07966e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java
@@ -62,7 +62,7 @@ public class RegionSplitCalculator<R extends KeyRange> {
    * Invariant: once populated this has 0 entries if empty or at most n+1 values
    * where n == number of added ranges.
    */
-  private final TreeSet<byte[]> splits = new TreeSet<byte[]>(BYTES_COMPARATOR);
+  private final TreeSet<byte[]> splits = new TreeSet<>(BYTES_COMPARATOR);
 
   /**
    * This is a map from start key to regions with the same start key.
@@ -177,11 +177,11 @@ public class RegionSplitCalculator<R extends KeyRange> {
    */
   public static <R extends KeyRange> List<R>
       findBigRanges(Collection<R> bigOverlap, int count) {
-    List<R> bigRanges = new ArrayList<R>();
+    List<R> bigRanges = new ArrayList<>();
 
     // The key is the count of overlaps,
     // The value is a list of ranges that have that many overlaps
-    TreeMap<Integer, List<R>> overlapRangeMap = new TreeMap<Integer, List<R>>();
+    TreeMap<Integer, List<R>> overlapRangeMap = new TreeMap<>();
     for (R r: bigOverlap) {
       // Calculates the # of overlaps for each region
       // and populates rangeOverlapMap
@@ -206,7 +206,7 @@ public class RegionSplitCalculator<R extends KeyRange> {
         Integer key = Integer.valueOf(overlappedRegions);
         List<R> ranges = overlapRangeMap.get(key);
         if (ranges == null) {
-          ranges = new ArrayList<R>();
+          ranges = new ArrayList<>();
           overlapRangeMap.put(key, ranges);
         }
         ranges.add(r);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index ce1b441..87ff010 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -768,7 +768,7 @@ public class RegionSplitter {
     Path hbDir = FSUtils.getRootDir(conf);
     Path tableDir = FSUtils.getTableDir(hbDir, tableName);
     Path splitFile = new Path(tableDir, "_balancedSplit");
-    return new Pair<Path, Path>(tableDir, splitFile);
+    return new Pair<>(tableDir, splitFile);
   }
 
   static LinkedList<Pair<byte[], byte[]>> getSplits(final Connection connection,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java
index e6b746c..9cc6d5a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java
@@ -94,7 +94,7 @@ public abstract class ServerCommandLine extends Configured implements Tool {
   public static void logProcessInfo(Configuration conf) {
     // log environment variables unless asked not to
     if (conf == null || !conf.getBoolean("hbase.envvars.logging.disabled", false)) {
-      Set<String> skipWords = new HashSet<String>(DEFAULT_SKIP_WORDS);
+      Set<String> skipWords = new HashSet<>(DEFAULT_SKIP_WORDS);
       if (conf != null) {
         String[] confSkipWords = conf.getStrings("hbase.envvars.logging.skipwords");
         if (confSkipWords != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
index 62163bf..05e0f49 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
@@ -49,15 +49,15 @@ public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
   private volatile SortedSet<E> internalSet;
 
   public SortedCopyOnWriteSet() {
-    this.internalSet = new TreeSet<E>();
+    this.internalSet = new TreeSet<>();
   }
 
   public SortedCopyOnWriteSet(Collection<? extends E> c) {
-    this.internalSet = new TreeSet<E>(c);
+    this.internalSet = new TreeSet<>(c);
   }
 
   public SortedCopyOnWriteSet(Comparator<? super E> comparator) {
-    this.internalSet = new TreeSet<E>(comparator);
+    this.internalSet = new TreeSet<>(comparator);
   }
 
   @Override
@@ -92,7 +92,7 @@ public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
 
   @Override
   public synchronized boolean add(E e) {
-    SortedSet<E> newSet = new TreeSet<E>(internalSet);
+    SortedSet<E> newSet = new TreeSet<>(internalSet);
     boolean added = newSet.add(e);
     internalSet = newSet;
     return added;
@@ -100,7 +100,7 @@ public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
 
   @Override
   public synchronized boolean remove(Object o) {
-    SortedSet<E> newSet = new TreeSet<E>(internalSet);
+    SortedSet<E> newSet = new TreeSet<>(internalSet);
     boolean removed = newSet.remove(o);
     internalSet = newSet;
     return removed;
@@ -113,7 +113,7 @@ public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
 
   @Override
   public synchronized boolean addAll(Collection<? extends E> c) {
-    SortedSet<E> newSet = new TreeSet<E>(internalSet);
+    SortedSet<E> newSet = new TreeSet<>(internalSet);
     boolean changed = newSet.addAll(c);
     internalSet = newSet;
     return changed;
@@ -121,7 +121,7 @@ public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
 
   @Override
   public synchronized boolean retainAll(Collection<?> c) {
-    SortedSet<E> newSet = new TreeSet<E>(internalSet);
+    SortedSet<E> newSet = new TreeSet<>(internalSet);
     boolean changed = newSet.retainAll(c);
     internalSet = newSet;
     return changed;
@@ -129,7 +129,7 @@ public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
 
   @Override
   public synchronized boolean removeAll(Collection<?> c) {
-    SortedSet<E> newSet = new TreeSet<E>(internalSet);
+    SortedSet<E> newSet = new TreeSet<>(internalSet);
     boolean changed = newSet.removeAll(c);
     internalSet = newSet;
     return changed;
@@ -139,9 +139,9 @@ public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
   public synchronized void clear() {
     Comparator<? super E> comparator = internalSet.comparator();
     if (comparator != null) {
-      internalSet = new TreeSet<E>(comparator);
+      internalSet = new TreeSet<>(comparator);
     } else {
-      internalSet = new TreeSet<E>();
+      internalSet = new TreeSet<>();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java
index 39f1f41..3f5576e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java
@@ -118,7 +118,7 @@ public class SortedList<E> implements List<E>, RandomAccess {
 
   @Override
   public synchronized boolean add(E e) {
-    ArrayList<E> newList = new ArrayList<E>(list);
+    ArrayList<E> newList = new ArrayList<>(list);
     boolean changed = newList.add(e);
     if (changed) {
       Collections.sort(newList, comparator);
@@ -129,7 +129,7 @@ public class SortedList<E> implements List<E>, RandomAccess {
 
   @Override
   public synchronized boolean remove(Object o) {
-    ArrayList<E> newList = new ArrayList<E>(list);
+    ArrayList<E> newList = new ArrayList<>(list);
     // Removals in ArrayList won't break sorting
     boolean changed = newList.remove(o);
     list = Collections.unmodifiableList(newList);
@@ -143,7 +143,7 @@ public class SortedList<E> implements List<E>, RandomAccess {
 
   @Override
   public synchronized boolean addAll(Collection<? extends E> c) {
-    ArrayList<E> newList = new ArrayList<E>(list);
+    ArrayList<E> newList = new ArrayList<>(list);
     boolean changed = newList.addAll(c);
     if (changed) {
       Collections.sort(newList, comparator);
@@ -154,7 +154,7 @@ public class SortedList<E> implements List<E>, RandomAccess {
 
   @Override
   public synchronized boolean addAll(int index, Collection<? extends E> c) {
-    ArrayList<E> newList = new ArrayList<E>(list);
+    ArrayList<E> newList = new ArrayList<>(list);
     boolean changed = newList.addAll(index, c);
     if (changed) {
       Collections.sort(newList, comparator);
@@ -165,7 +165,7 @@ public class SortedList<E> implements List<E>, RandomAccess {
 
   @Override
   public synchronized boolean removeAll(Collection<?> c) {
-    ArrayList<E> newList = new ArrayList<E>(list);
+    ArrayList<E> newList = new ArrayList<>(list);
     // Removals in ArrayList won't break sorting
     boolean changed = newList.removeAll(c);
     list = Collections.unmodifiableList(newList);
@@ -174,7 +174,7 @@ public class SortedList<E> implements List<E>, RandomAccess {
 
   @Override
   public synchronized boolean retainAll(Collection<?> c) {
-    ArrayList<E> newList = new ArrayList<E>(list);
+    ArrayList<E> newList = new ArrayList<>(list);
     // Removals in ArrayList won't break sorting
     boolean changed = newList.retainAll(c);
     list = Collections.unmodifiableList(newList);
@@ -193,7 +193,7 @@ public class SortedList<E> implements List<E>, RandomAccess {
 
   @Override
   public synchronized E set(int index, E element) {
-    ArrayList<E> newList = new ArrayList<E>(list);
+    ArrayList<E> newList = new ArrayList<>(list);
     E result = newList.set(index, element);
     Collections.sort(list, comparator);
     list = Collections.unmodifiableList(newList);
@@ -202,7 +202,7 @@ public class SortedList<E> implements List<E>, RandomAccess {
 
   @Override
   public synchronized void add(int index, E element) {
-    ArrayList<E> newList = new ArrayList<E>(list);
+    ArrayList<E> newList = new ArrayList<>(list);
     newList.add(index, element);
     Collections.sort(list, comparator);
     list = Collections.unmodifiableList(newList);
@@ -210,7 +210,7 @@ public class SortedList<E> implements List<E>, RandomAccess {
 
   @Override
   public synchronized E remove(int index) {
-    ArrayList<E> newList = new ArrayList<E>(list);
+    ArrayList<E> newList = new ArrayList<>(list);
     // Removals in ArrayList won't break sorting
     E result = newList.remove(index);
     list = Collections.unmodifiableList(newList);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
index 820da7a..82200bd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
@@ -66,14 +66,14 @@ public class HFileCorruptionChecker {
   final FileSystem fs;
   final CacheConfig cacheConf;
   final ExecutorService executor;
-  final Set<Path> corrupted = new ConcurrentSkipListSet<Path>();
-  final Set<Path> failures = new ConcurrentSkipListSet<Path>();
-  final Set<Path> quarantined = new ConcurrentSkipListSet<Path>();
-  final Set<Path> missing = new ConcurrentSkipListSet<Path>();
-  final Set<Path> corruptedMobFiles = new ConcurrentSkipListSet<Path>();
-  final Set<Path> failureMobFiles = new ConcurrentSkipListSet<Path>();
-  final Set<Path> missedMobFiles = new ConcurrentSkipListSet<Path>();
-  final Set<Path> quarantinedMobFiles = new ConcurrentSkipListSet<Path>();
+  final Set<Path> corrupted = new ConcurrentSkipListSet<>();
+  final Set<Path> failures = new ConcurrentSkipListSet<>();
+  final Set<Path> quarantined = new ConcurrentSkipListSet<>();
+  final Set<Path> missing = new ConcurrentSkipListSet<>();
+  final Set<Path> corruptedMobFiles = new ConcurrentSkipListSet<>();
+  final Set<Path> failureMobFiles = new ConcurrentSkipListSet<>();
+  final Set<Path> missedMobFiles = new ConcurrentSkipListSet<>();
+  final Set<Path> quarantinedMobFiles = new ConcurrentSkipListSet<>();
   final boolean inQuarantineMode;
   final AtomicInteger hfilesChecked = new AtomicInteger();
   final AtomicInteger mobFilesChecked = new AtomicInteger();
@@ -343,7 +343,7 @@ public class HFileCorruptionChecker {
     }
 
     // Parallelize check at the region dir level
-    List<RegionDirChecker> rdcs = new ArrayList<RegionDirChecker>(rds.size() + 1);
+    List<RegionDirChecker> rdcs = new ArrayList<>(rds.size() + 1);
     List<Future<Void>> rdFutures;
 
     for (FileStatus rdFs : rds) {
@@ -451,14 +451,14 @@ public class HFileCorruptionChecker {
    * @return the set of check failure file paths after checkTables is called.
    */
   public Collection<Path> getFailures() {
-    return new HashSet<Path>(failures);
+    return new HashSet<>(failures);
   }
 
   /**
    * @return the set of corrupted file paths after checkTables is called.
    */
   public Collection<Path> getCorrupted() {
-    return new HashSet<Path>(corrupted);
+    return new HashSet<>(corrupted);
   }
 
   /**
@@ -472,7 +472,7 @@ public class HFileCorruptionChecker {
    * @return the set of successfully quarantined paths after checkTables is called.
    */
   public Collection<Path> getQuarantined() {
-    return new HashSet<Path>(quarantined);
+    return new HashSet<>(quarantined);
   }
 
   /**
@@ -480,21 +480,21 @@ public class HFileCorruptionChecker {
    *  compaction or flushes.
    */
   public Collection<Path> getMissing() {
-    return new HashSet<Path>(missing);
+    return new HashSet<>(missing);
   }
 
   /**
    * @return the set of check failure mob file paths after checkTables is called.
    */
   public Collection<Path> getFailureMobFiles() {
-    return new HashSet<Path>(failureMobFiles);
+    return new HashSet<>(failureMobFiles);
   }
 
   /**
    * @return the set of corrupted mob file paths after checkTables is called.
    */
   public Collection<Path> getCorruptedMobFiles() {
-    return new HashSet<Path>(corruptedMobFiles);
+    return new HashSet<>(corruptedMobFiles);
   }
 
   /**
@@ -508,7 +508,7 @@ public class HFileCorruptionChecker {
    * @return the set of successfully quarantined paths after checkTables is called.
    */
   public Collection<Path> getQuarantinedMobFiles() {
-    return new HashSet<Path>(quarantinedMobFiles);
+    return new HashSet<>(quarantinedMobFiles);
   }
 
   /**
@@ -516,7 +516,7 @@ public class HFileCorruptionChecker {
    *  deletion/moves from compaction.
    */
   public Collection<Path> getMissedMobFiles() {
-    return new HashSet<Path>(missedMobFiles);
+    return new HashSet<>(missedMobFiles);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
index bdd319d..9dd85d8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
@@ -115,7 +115,7 @@ public abstract class AbstractFSWALProvider<T extends AbstractFSWAL<?>> implemen
     if (wal == null) {
       return Collections.emptyList();
     }
-    List<WAL> wals = new ArrayList<WAL>(1);
+    List<WAL> wals = new ArrayList<>(1);
     wals.add(wal);
     return wals;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java
index 5b32347..81b1c00 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java
@@ -38,8 +38,7 @@ public class BoundedGroupingStrategy implements RegionGroupingStrategy{
   static final String NUM_REGION_GROUPS = "hbase.wal.regiongrouping.numgroups";
   static final int DEFAULT_NUM_REGION_GROUPS = 2;
 
-  private ConcurrentHashMap<String, String> groupNameCache =
-      new ConcurrentHashMap<String, String>();
+  private ConcurrentHashMap<String, String> groupNameCache = new ConcurrentHashMap<>();
   private AtomicInteger counter = new AtomicInteger(0);
   private String[] groupNames;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
index 5bee923..b442f07 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
@@ -68,7 +68,7 @@ class DisabledWALProvider implements WALProvider {
 
   @Override
   public List<WAL> getWALs() throws IOException {
-    List<WAL> wals = new ArrayList<WAL>(1);
+    List<WAL> wals = new ArrayList<>(1);
     wals.add(disabled);
     return wals;
   }
@@ -89,8 +89,7 @@ class DisabledWALProvider implements WALProvider {
   }
 
   private static class DisabledWAL implements WAL {
-    protected final List<WALActionsListener> listeners =
-        new CopyOnWriteArrayList<WALActionsListener>();
+    protected final List<WALActionsListener> listeners = new CopyOnWriteArrayList<>();
     protected final Path path;
     protected final WALCoprocessorHost coprocessorHost;
     protected final AtomicBoolean closed = new AtomicBoolean(false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
index 25e70d7..dee36e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
@@ -171,7 +171,7 @@ public class RegionGroupingProvider implements WALProvider {
 
   @Override
   public List<WAL> getWALs() throws IOException {
-    List<WAL> wals = new ArrayList<WAL>();
+    List<WAL> wals = new ArrayList<>();
     for (WALProvider provider : cached.values()) {
       wals.addAll(provider.getWALs());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index abdc20c..114715f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -93,7 +93,7 @@ public class WALFactory {
   // The meta updates are written to a different wal. If this
   // regionserver holds meta regions, then this ref will be non-null.
   // lazily intialized; most RegionServers don't deal with META
-  final AtomicReference<WALProvider> metaProvider = new AtomicReference<WALProvider>();
+  final AtomicReference<WALProvider> metaProvider = new AtomicReference<>();
 
   /**
    * Configuration-specified WAL Reader used when a custom reader is requested
@@ -368,7 +368,7 @@ public class WALFactory {
   // untangle the reliance on state in the filesystem. They rely on singleton
   // WALFactory that just provides Reader / Writers.
   // For now, first Configuration object wins. Practically this just impacts the reader/writer class
-  private static final AtomicReference<WALFactory> singleton = new AtomicReference<WALFactory>();
+  private static final AtomicReference<WALFactory> singleton = new AtomicReference<>();
   private static final String SINGLETON_ID = WALFactory.class.getName();
   
   // public only for FSHLog

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
index 276ab36..9a8003a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
@@ -192,19 +192,19 @@ public class WALKey implements SequenceId, Comparable<WALKey> {
 
   public WALKey() {
     init(null, null, 0L, HConstants.LATEST_TIMESTAMP,
-        new ArrayList<UUID>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, null);
+        new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, null);
   }
 
   public WALKey(final NavigableMap<byte[], Integer> replicationScope) {
     init(null, null, 0L, HConstants.LATEST_TIMESTAMP,
-        new ArrayList<UUID>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, replicationScope);
+        new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, replicationScope);
   }
 
   @VisibleForTesting
   public WALKey(final byte[] encodedRegionName, final TableName tablename,
                 long logSeqNum,
       final long now, UUID clusterId) {
-    List<UUID> clusterIds = new ArrayList<UUID>(1);
+    List<UUID> clusterIds = new ArrayList<>(1);
     clusterIds.add(clusterId);
     init(encodedRegionName, tablename, logSeqNum, now, clusterIds,
         HConstants.NO_NONCE, HConstants.NO_NONCE, null, null);
@@ -543,7 +543,7 @@ public class WALKey implements SequenceId, Comparable<WALKey> {
    * @return a Map containing data from this key
    */
   public Map<String, Object> toStringMap() {
-    Map<String, Object> stringMap = new HashMap<String, Object>();
+    Map<String, Object> stringMap = new HashMap<>();
     stringMap.put("table", tablename);
     stringMap.put("region", Bytes.toStringBinary(encodedRegionName));
     stringMap.put("sequence", getSequenceId());
@@ -684,7 +684,7 @@ public class WALKey implements SequenceId, Comparable<WALKey> {
     }
     this.replicationScope = null;
     if (walKey.getScopesCount() > 0) {
-      this.replicationScope = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
+      this.replicationScope = new TreeMap<>(Bytes.BYTES_COMPARATOR);
       for (FamilyScope scope : walKey.getScopesList()) {
         byte[] family = (compressionContext == null) ? scope.getFamily().toByteArray() :
           uncompressor.uncompress(scope.getFamily(), compressionContext.familyDict);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
index a6fd85f..37473e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
@@ -283,10 +283,10 @@ public class WALPrettyPrinter {
         if (region != null && !((String) txn.get("region")).equals(region))
           continue;
         // initialize list into which we will store atomic actions
-        List<Map> actions = new ArrayList<Map>();
+        List<Map> actions = new ArrayList<>();
         for (Cell cell : edit.getCells()) {
           // add atomic operation to txn
-          Map<String, Object> op = new HashMap<String, Object>(toStringMap(cell));
+          Map<String, Object> op = new HashMap<>(toStringMap(cell));
           if (outputValues) op.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell)));
           // check row output filter
           if (row == null || ((String) op.get("row")).equals(row)) {
@@ -328,7 +328,7 @@ public class WALPrettyPrinter {
   }
 
   private static Map<String, Object> toStringMap(Cell cell) {
-    Map<String, Object> stringMap = new HashMap<String, Object>();
+    Map<String, Object> stringMap = new HashMap<>();
     stringMap.put("row",
         Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
     stringMap.put("family", Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(),
@@ -339,7 +339,7 @@ public class WALPrettyPrinter {
     stringMap.put("timestamp", cell.getTimestamp());
     stringMap.put("vlen", cell.getValueLength());
     if (cell.getTagsLength() > 0) {
-      List<String> tagsString = new ArrayList<String>();
+      List<String> tagsString = new ArrayList<>();
       Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell);
       while (tagsIterator.hasNext()) {
         Tag tag = tagsIterator.next();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 2cf2c6b..d87c71b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -158,11 +158,10 @@ public class WALSplitter {
   protected boolean distributedLogReplay;
 
   // Map encodedRegionName -> lastFlushedSequenceId
-  protected Map<String, Long> lastFlushedSequenceIds = new ConcurrentHashMap<String, Long>();
+  protected Map<String, Long> lastFlushedSequenceIds = new ConcurrentHashMap<>();
 
   // Map encodedRegionName -> maxSeqIdInStores
-  protected Map<String, Map<byte[], Long>> regionMaxSeqIdInStores =
-      new ConcurrentHashMap<String, Map<byte[], Long>>();
+  protected Map<String, Map<byte[], Long>> regionMaxSeqIdInStores = new ConcurrentHashMap<>();
 
   // Failed region server that the wal file being split belongs to
   protected String failedServerName = "";
@@ -245,7 +244,7 @@ public class WALSplitter {
       FileSystem fs, Configuration conf, final WALFactory factory) throws IOException {
     final FileStatus[] logfiles = SplitLogManager.getFileList(conf,
         Collections.singletonList(logDir), null);
-    List<Path> splits = new ArrayList<Path>();
+    List<Path> splits = new ArrayList<>();
     if (logfiles != null && logfiles.length > 0) {
       for (FileStatus logfile: logfiles) {
         WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, null, null,
@@ -331,7 +330,7 @@ public class WALSplitter {
             }
           } else if (sequenceIdChecker != null) {
             RegionStoreSequenceIds ids = sequenceIdChecker.getLastSequenceId(region);
-            Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+            Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
             for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) {
               maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(),
                 storeSeqId.getSequenceId());
@@ -447,8 +446,8 @@ public class WALSplitter {
 
   private static void finishSplitLogFile(Path rootdir, Path oldLogDir,
       Path logPath, Configuration conf) throws IOException {
-    List<Path> processedLogs = new ArrayList<Path>();
-    List<Path> corruptedLogs = new ArrayList<Path>();
+    List<Path> processedLogs = new ArrayList<>();
+    List<Path> corruptedLogs = new ArrayList<>();
     FileSystem fs;
     fs = rootdir.getFileSystem(conf);
     if (ZKSplitLog.isCorrupted(rootdir, logPath.getName(), fs)) {
@@ -614,7 +613,7 @@ public class WALSplitter {
    */
   public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
       final Path regiondir) throws IOException {
-    NavigableSet<Path> filesSorted = new TreeSet<Path>();
+    NavigableSet<Path> filesSorted = new TreeSet<>();
     Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
     if (!fs.exists(editsdir))
       return filesSorted;
@@ -872,7 +871,7 @@ public class WALSplitter {
   public static class PipelineController {
     // If an exception is thrown by one of the other threads, it will be
     // stored here.
-    AtomicReference<Throwable> thrown = new AtomicReference<Throwable>();
+    AtomicReference<Throwable> thrown = new AtomicReference<>();
 
     // Wait/notify for when data has been produced by the writer thread,
     // consumed by the reader thread, or an exception occurred
@@ -906,13 +905,12 @@ public class WALSplitter {
   public static class EntryBuffers {
     PipelineController controller;
 
-    Map<byte[], RegionEntryBuffer> buffers =
-      new TreeMap<byte[], RegionEntryBuffer>(Bytes.BYTES_COMPARATOR);
+    Map<byte[], RegionEntryBuffer> buffers = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 
     /* Track which regions are currently in the middle of writing. We don't allow
        an IO thread to pick up bytes from a region if we're already writing
        data for that region in a different IO thread. */
-    Set<byte[]> currentlyWriting = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+    Set<byte[]> currentlyWriting = new TreeSet<>(Bytes.BYTES_COMPARATOR);
 
     long totalBuffered = 0;
     long maxHeapUsage;
@@ -1027,7 +1025,7 @@ public class WALSplitter {
     RegionEntryBuffer(TableName tableName, byte[] region) {
       this.tableName = tableName;
       this.encodedRegionName = region;
-      this.entryBuffer = new LinkedList<Entry>();
+      this.entryBuffer = new LinkedList<>();
     }
 
     long appendEntry(Entry entry) {
@@ -1148,7 +1146,7 @@ public class WALSplitter {
 
     /* Set of regions which we've decided should not output edits */
     protected final Set<byte[]> blacklistedRegions = Collections
-        .synchronizedSet(new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR));
+        .synchronizedSet(new TreeSet<>(Bytes.BYTES_COMPARATOR));
 
     protected boolean closeAndCleanCompleted = false;
 
@@ -1360,7 +1358,7 @@ public class WALSplitter {
     private List<Path> close() throws IOException {
       Preconditions.checkState(!closeAndCleanCompleted);
 
-      final List<Path> paths = new ArrayList<Path>();
+      final List<Path> paths = new ArrayList<>();
       final List<IOException> thrown = Lists.newArrayList();
       ThreadPoolExecutor closeThreadPool = Threads.getBoundedCachedThreadPool(numThreads, 30L,
         TimeUnit.SECONDS, new ThreadFactory() {
@@ -1372,8 +1370,7 @@ public class WALSplitter {
             return t;
           }
         });
-      CompletionService<Void> completionService =
-        new ExecutorCompletionService<Void>(closeThreadPool);
+      CompletionService<Void> completionService = new ExecutorCompletionService<>(closeThreadPool);
       for (final Map.Entry<byte[], SinkWriter> writersEntry : writers.entrySet()) {
         if (LOG.isTraceEnabled()) {
           LOG.trace("Submitting close of " + ((WriterAndPath)writersEntry.getValue()).p);
@@ -1558,7 +1555,7 @@ public class WALSplitter {
       }
       // Create the array list for the cells that aren't filtered.
       // We make the assumption that most cells will be kept.
-      ArrayList<Cell> keptCells = new ArrayList<Cell>(logEntry.getEdit().getCells().size());
+      ArrayList<Cell> keptCells = new ArrayList<>(logEntry.getEdit().getCells().size());
       for (Cell cell : logEntry.getEdit().getCells()) {
         if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
           keptCells.add(cell);
@@ -1639,7 +1636,7 @@ public class WALSplitter {
      */
     @Override
     public Map<byte[], Long> getOutputCounts() {
-      TreeMap<byte[], Long> ret = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+      TreeMap<byte[], Long> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR);
       synchronized (writers) {
         for (Map.Entry<byte[], SinkWriter> entry : writers.entrySet()) {
           ret.put(entry.getKey(), entry.getValue().editsWritten);
@@ -1705,8 +1702,7 @@ public class WALSplitter {
     private final Set<String> recoveredRegions = Collections.synchronizedSet(new HashSet<String>());
     private final Map<String, RegionServerWriter> writers = new ConcurrentHashMap<>();
     // online encoded region name -> region location map
-    private final Map<String, HRegionLocation> onlineRegions =
-        new ConcurrentHashMap<String, HRegionLocation>();
+    private final Map<String, HRegionLocation> onlineRegions = new ConcurrentHashMap<>();
 
     private final Map<TableName, ClusterConnection> tableNameToHConnectionMap = Collections
         .synchronizedMap(new TreeMap<TableName, ClusterConnection>());
@@ -1859,7 +1855,7 @@ public class WALSplitter {
                 + encodeRegionNameStr);
             lastFlushedSequenceIds.put(encodeRegionNameStr, Long.MAX_VALUE);
             if (nonExistentTables == null) {
-              nonExistentTables = new TreeSet<TableName>();
+              nonExistentTables = new TreeSet<>();
             }
             nonExistentTables.add(table);
             this.skippedEdits.incrementAndGet();
@@ -1906,7 +1902,7 @@ public class WALSplitter {
                 Collections.synchronizedList(new ArrayList<Pair<HRegionLocation, Entry>>());
             serverToBufferQueueMap.put(locKey, queue);
           }
-          queue.add(new Pair<HRegionLocation, Entry>(loc, entry));
+          queue.add(new Pair<>(loc, entry));
         }
         // store regions we have recovered so far
         addToRecoveredRegions(loc.getRegionInfo().getEncodedName());
@@ -1957,7 +1953,7 @@ public class WALSplitter {
               loc.getRegionInfo().getEncodedName());
         if (ids != null) {
           lastFlushedSequenceId = ids.getLastFlushedSequenceId();
-          Map<byte[], Long> storeIds = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+          Map<byte[], Long> storeIds = new TreeMap<>(Bytes.BYTES_COMPARATOR);
           List<StoreSequenceId> maxSeqIdInStores = ids.getStoreSequenceIdList();
           for (StoreSequenceId id : maxSeqIdInStores) {
             storeIds.put(id.getFamilyName().toByteArray(), id.getSequenceId());
@@ -2102,7 +2098,7 @@ public class WALSplitter {
         if (hasEditsInDisablingOrDisabledTables) {
           splits = logRecoveredEditsOutputSink.finishWritingAndClose();
         } else {
-          splits = new ArrayList<Path>();
+          splits = new ArrayList<>();
         }
         // returns an empty array in order to keep interface same as old way
         return splits;
@@ -2316,13 +2312,13 @@ public class WALSplitter {
 
     if (entry == null) {
       // return an empty array
-      return new ArrayList<MutationReplay>();
+      return new ArrayList<>();
     }
 
     long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ?
       entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
     int count = entry.getAssociatedCellCount();
-    List<MutationReplay> mutations = new ArrayList<MutationReplay>();
+    List<MutationReplay> mutations = new ArrayList<>();
     Cell previousCell = null;
     Mutation m = null;
     WALKey key = null;
@@ -2369,7 +2365,7 @@ public class WALSplitter {
     if (logEntry != null) {
       org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto =
           entry.getKey();
-      List<UUID> clusterIds = new ArrayList<UUID>(walKeyProto.getClusterIdsCount());
+      List<UUID> clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount());
       for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) {
         clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java
index e6d3b7f..32e0862 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java
@@ -50,7 +50,7 @@ public class DrainingServerTracker extends ZooKeeperListener {
   private static final Log LOG = LogFactory.getLog(DrainingServerTracker.class);
 
   private ServerManager serverManager;
-  private final NavigableSet<ServerName> drainingServers = new TreeSet<ServerName>();
+  private final NavigableSet<ServerName> drainingServers = new TreeSet<>();
   private Abortable abortable;
 
   public DrainingServerTracker(ZooKeeperWatcher watcher,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
index ff73073..7dea269 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
@@ -80,9 +80,9 @@ public class MiniZooKeeperCluster {
     this.started = false;
     this.configuration = configuration;
     activeZKServerIndex = -1;
-    zooKeeperServers = new ArrayList<ZooKeeperServer>();
-    clientPortList = new ArrayList<Integer>();
-    standaloneServerFactoryList = new ArrayList<NIOServerCnxnFactory>();
+    zooKeeperServers = new ArrayList<>();
+    clientPortList = new ArrayList<>();
+    standaloneServerFactoryList = new ArrayList<>();
     connectionTimeout = configuration.getInt(HConstants.ZK_SESSION_TIMEOUT + ".localHBaseCluster",
       DEFAULT_CONNECTION_TIMEOUT);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
index 19d2d00..69cd233 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
@@ -48,8 +48,7 @@ import org.apache.zookeeper.KeeperException;
 @InterfaceAudience.Private
 public class RegionServerTracker extends ZooKeeperListener {
   private static final Log LOG = LogFactory.getLog(RegionServerTracker.class);
-  private NavigableMap<ServerName, RegionServerInfo> regionServers = 
-      new TreeMap<ServerName, RegionServerInfo>();
+  private NavigableMap<ServerName, RegionServerInfo> regionServers = new TreeMap<>();
   private ServerManager serverManager;
   private Server server;
 
@@ -154,7 +153,7 @@ public class RegionServerTracker extends ZooKeeperListener {
    */
   public List<ServerName> getOnlineServers() {
     synchronized (this.regionServers) {
-      return new ArrayList<ServerName>(this.regionServers.keySet());
+      return new ArrayList<>(this.regionServers.keySet());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java
index 455cfd2..b96924d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java
@@ -36,7 +36,7 @@ import java.util.List;
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
 public class ZKServerTool {
   public static ServerName[] readZKNodes(Configuration conf) {
-    List<ServerName> hosts = new LinkedList<ServerName>();
+    List<ServerName> hosts = new LinkedList<>();
     String quorum = conf.get(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
 
     String[] values = quorum.split(",");

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 897dad7..0f8a289 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -372,11 +372,11 @@ if ( fqtn != null ) {
   long totalStoreFileCount = 0;
   long totalMemSize = 0;
   String urlRegionServer = null;
-  Map<ServerName, Integer> regDistribution = new TreeMap<ServerName, Integer>();
-  Map<ServerName, Integer> primaryRegDistribution = new TreeMap<ServerName, Integer>();
+  Map<ServerName, Integer> regDistribution = new TreeMap<>();
+  Map<ServerName, Integer> primaryRegDistribution = new TreeMap<>();
   List<HRegionLocation> regions = r.getAllRegionLocations();
-  Map<HRegionInfo, RegionLoad> regionsToLoad = new LinkedHashMap<HRegionInfo, RegionLoad>();
-  Map<HRegionInfo, ServerName> regionsToServer = new LinkedHashMap<HRegionInfo, ServerName>();
+  Map<HRegionInfo, RegionLoad> regionsToLoad = new LinkedHashMap<>();
+  Map<HRegionInfo, ServerName> regionsToServer = new LinkedHashMap<>();
   for (HRegionLocation hriEntry : regions) {
     HRegionInfo regionInfo = hriEntry.getRegionInfo();
     ServerName addr = hriEntry.getServerName();
@@ -448,7 +448,7 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
 </tr>
 
 <%
-  List<Map.Entry<HRegionInfo, RegionLoad>> entryList = new ArrayList<Map.Entry<HRegionInfo, RegionLoad>>(regionsToLoad.entrySet());
+  List<Map.Entry<HRegionInfo, RegionLoad>> entryList = new ArrayList<>(regionsToLoad.entrySet());
   if(sortKey != null) {
     if (sortKey.equals("readrequest")) {
       Collections.sort(entryList,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 724761a..47170b1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2287,7 +2287,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
 
   public int countRows(final InternalScanner scanner) throws IOException {
     int scannedCount = 0;
-    List<Cell> results = new ArrayList<Cell>();
+    List<Cell> results = new ArrayList<>();
     boolean hasMore = true;
     while (hasMore) {
       hasMore = scanner.next(results);
@@ -2367,7 +2367,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
   throws IOException {
     Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
-    List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
+    List<HRegionInfo> newRegions = new ArrayList<>(startKeys.length);
     MetaTableAccessor
         .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED);
     // add custom ones
@@ -2426,7 +2426,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
   public List<byte[]> getMetaTableRows() throws IOException {
     // TODO: Redo using MetaTableAccessor class
     Table t = getConnection().getTable(TableName.META_TABLE_NAME);
-    List<byte[]> rows = new ArrayList<byte[]>();
+    List<byte[]> rows = new ArrayList<>();
     ResultScanner s = t.getScanner(new Scan());
     for (Result result : s) {
       LOG.info("getMetaTableRows: row -> " +
@@ -2446,7 +2446,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
   public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
     // TODO: Redo using MetaTableAccessor.
     Table t = getConnection().getTable(TableName.META_TABLE_NAME);
-    List<byte[]> rows = new ArrayList<byte[]>();
+    List<byte[]> rows = new ArrayList<>();
     ResultScanner s = t.getScanner(new Scan());
     for (Result result : s) {
       HRegionInfo info = MetaTableAccessor.getHRegionInfo(result);
@@ -3219,7 +3219,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
 
   public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
       throws IOException {
-    NavigableSet<String> online = new TreeSet<String>();
+    NavigableSet<String> online = new TreeSet<>();
     for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
       try {
         for (HRegionInfo region :
@@ -3391,7 +3391,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
         // readpoint 0.
         0);
 
-    List<Cell> result = new ArrayList<Cell>();
+    List<Cell> result = new ArrayList<>();
     scanner.next(result);
     if (!result.isEmpty()) {
       // verify that we are on the row we want:
@@ -3601,7 +3601,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     private static final int MAX_RANDOM_PORT = 0xfffe;
 
     /** A set of ports that have been claimed using {@link #randomFreePort()}. */
-    private final Set<Integer> takenRandomPorts = new HashSet<Integer>();
+    private final Set<Integer> takenRandomPorts = new HashSet<>();
 
     private final Random random;
     private final AvailablePortChecker portChecker;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java
index e5334bf..268f79c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java
@@ -41,7 +41,7 @@ public class HTestConst {
   public static final byte[] DEFAULT_CF_BYTES = Bytes.toBytes(DEFAULT_CF_STR);
 
   public static final Set<String> DEFAULT_CF_STR_SET =
-      Collections.unmodifiableSet(new HashSet<String>(
+      Collections.unmodifiableSet(new HashSet<>(
           Arrays.asList(new String[] { DEFAULT_CF_STR })));
 
   public static final String DEFAULT_ROW_STR = "MyTestRow";

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java
index 42e2811..9a1515b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java
@@ -68,7 +68,7 @@ public class MetaMockingUtil {
    */
   public static Result getMetaTableRowResult(HRegionInfo region, final ServerName sn,
       HRegionInfo splita, HRegionInfo splitb) throws IOException {
-    List<Cell> kvs = new ArrayList<Cell>();
+    List<Cell> kvs = new ArrayList<>();
     if (region != null) {
       kvs.add(new KeyValue(
         region.getRegionName(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index a8887d4..55529c6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -672,7 +672,7 @@ public class MiniHBaseCluster extends HBaseCluster {
   }
 
   public List<HRegion> getRegions(TableName tableName) {
-    List<HRegion> ret = new ArrayList<HRegion>();
+    List<HRegion> ret = new ArrayList<>();
     for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
       HRegionServer hrs = rst.getRegionServer();
       for (Region region : hrs.getOnlineRegionsLocalContext()) {
@@ -770,7 +770,7 @@ public class MiniHBaseCluster extends HBaseCluster {
   }
 
   public List<HRegion> findRegionsForTable(TableName tableName) {
-    ArrayList<HRegion> ret = new ArrayList<HRegion>();
+    ArrayList<HRegion> ret = new ArrayList<>();
     for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
       HRegionServer hrs = rst.getRegionServer();
       for (Region region : hrs.getOnlineRegions(tableName)) {