You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2017/03/10 22:10:01 UTC

[21/50] [abbrv] hbase git commit: HBASE-17532 Replaced explicit type with diamond operator

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
index 0735629..91c0050 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
@@ -70,7 +70,7 @@ public abstract class AbstractMultiFileWriter implements CellSink, ShipperListen
       LOG.debug("Commit " + writers.size() + " writers, maxSeqId=" + maxSeqId
           + ", majorCompaction=" + majorCompaction);
     }
-    List<Path> paths = new ArrayList<Path>();
+    List<Path> paths = new ArrayList<>();
     for (StoreFileWriter writer : writers) {
       if (writer == null) {
         continue;
@@ -87,7 +87,7 @@ public abstract class AbstractMultiFileWriter implements CellSink, ShipperListen
    * Close all writers without throwing any exceptions. This is used when compaction failed usually.
    */
   public List<Path> abortWriters() {
-    List<Path> paths = new ArrayList<Path>();
+    List<Path> paths = new ArrayList<>();
     for (StoreFileWriter writer : writers()) {
       try {
         if (writer != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java
index c492180..6c98c1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java
@@ -92,10 +92,8 @@ public class AnnotationReadingPriorityFunction implements PriorityFunction {
   };
 
   // Some caches for helping performance
-  private final Map<String, Class<? extends Message>> argumentToClassMap =
-    new HashMap<String, Class<? extends Message>>();
-  private final Map<String, Map<Class<? extends Message>, Method>> methodMap =
-    new HashMap<String, Map<Class<? extends Message>, Method>>();
+  private final Map<String, Class<? extends Message>> argumentToClassMap = new HashMap<>();
+  private final Map<String, Map<Class<? extends Message>, Method>> methodMap = new HashMap<>();
 
   private final float scanVirtualTimeWeight;
 
@@ -121,7 +119,7 @@ public class AnnotationReadingPriorityFunction implements PriorityFunction {
    */
   public AnnotationReadingPriorityFunction(final RSRpcServices rpcServices,
       Class<? extends RSRpcServices> clz) {
-    Map<String,Integer> qosMap = new HashMap<String,Integer>();
+    Map<String,Integer> qosMap = new HashMap<>();
     for (Method m : clz.getMethods()) {
       QosPriority p = m.getAnnotation(QosPriority.class);
       if (p != null) {
@@ -137,8 +135,8 @@ public class AnnotationReadingPriorityFunction implements PriorityFunction {
     this.rpcServices = rpcServices;
     this.annotatedQos = qosMap;
     if (methodMap.get("getRegion") == null) {
-      methodMap.put("hasRegion", new HashMap<Class<? extends Message>, Method>());
-      methodMap.put("getRegion", new HashMap<Class<? extends Message>, Method>());
+      methodMap.put("hasRegion", new HashMap<>());
+      methodMap.put("getRegion", new HashMap<>());
     }
     for (Class<? extends Message> cls : knownArgumentClasses) {
       argumentToClassMap.put(cls.getName(), cls);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java
index be2bd91..0b1ab18 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java
@@ -57,7 +57,7 @@ implements RowProcessor<S,T> {
 
   @Override
   public List<UUID> getClusterIds() {
-    return new ArrayList<UUID>();
+    return new ArrayList<>();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
index 531bf66..9f08712 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
@@ -48,7 +48,7 @@ public class CellSet implements NavigableSet<Cell>  {
   private final NavigableMap<Cell, Cell> delegatee; ///
 
   CellSet(final CellComparator c) {
-    this.delegatee = new ConcurrentSkipListMap<Cell, Cell>(c);
+    this.delegatee = new ConcurrentSkipListMap<>(c);
   }
 
   CellSet(final NavigableMap<Cell, Cell> m) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index 6870445..eba984a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -285,14 +285,14 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi
     // not a special compaction request, so make our own list
     List<CompactionRequest> ret = null;
     if (requests == null) {
-      ret = selectNow ? new ArrayList<CompactionRequest>(r.getStores().size()) : null;
+      ret = selectNow ? new ArrayList<>(r.getStores().size()) : null;
       for (Store s : r.getStores()) {
         CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow, user);
         if (selectNow) ret.add(cr);
       }
     } else {
       Preconditions.checkArgument(selectNow); // only system requests have selectNow == false
-      ret = new ArrayList<CompactionRequest>(requests.size());
+      ret = new ArrayList<>(requests.size());
       for (Pair<CompactionRequest, Store> pair : requests) {
         ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst(), user));
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 312e9fc..511bd80 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -322,7 +322,7 @@ public class CompactingMemStore extends AbstractMemStore {
     // The list of elements in pipeline + the active element + the snapshot segment
     // TODO : This will change when the snapshot is made of more than one element
     // The order is the Segment ordinal
-    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(order+1);
+    List<KeyValueScanner> list = new ArrayList<>(order+1);
     list.add(this.active.getScanner(readPt, order + 1));
     for (Segment item : pipelineList) {
       list.add(item.getScanner(readPt, order));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index b037c89..bea3e7f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -248,7 +248,7 @@ public class CompactionTool extends Configured implements Tool {
      */
     @Override
     public List<InputSplit> getSplits(JobContext job) throws IOException {
-      List<InputSplit> splits = new ArrayList<InputSplit>();
+      List<InputSplit> splits = new ArrayList<>();
       List<FileStatus> files = listStatus(job);
 
       Text key = new Text();
@@ -301,7 +301,7 @@ public class CompactionTool extends Configured implements Tool {
     public static void createInputFile(final FileSystem fs, final Path path,
         final Set<Path> toCompactDirs) throws IOException {
       // Extract the list of store dirs
-      List<Path> storeDirs = new LinkedList<Path>();
+      List<Path> storeDirs = new LinkedList<>();
       for (Path compactDir: toCompactDirs) {
         if (isFamilyDir(fs, compactDir)) {
           storeDirs.add(compactDir);
@@ -389,7 +389,7 @@ public class CompactionTool extends Configured implements Tool {
 
   @Override
   public int run(String[] args) throws Exception {
-    Set<Path> toCompactDirs = new HashSet<Path>();
+    Set<Path> toCompactDirs = new HashSet<>();
     boolean compactOnce = false;
     boolean major = false;
     boolean mapred = false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
index 30d17fb..73556bd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -65,7 +65,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
 
   @VisibleForTesting
   public List<Segment> getAllSegments() {
-    return new LinkedList<Segment>(segments);
+    return new LinkedList<>(segments);
   }
 
   public int getNumOfSegments() {
@@ -150,7 +150,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
    */
   public KeyValueScanner getScanner(long readPoint, long order) {
     KeyValueScanner resultScanner;
-    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(segments.size());
+    List<KeyValueScanner> list = new ArrayList<>(segments.size());
     for (ImmutableSegment s : segments) {
       list.add(s.getScanner(readPoint, order));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
index 2cea92f..e682597 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
@@ -34,8 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 @InterfaceAudience.Private
 public class DateTieredMultiFileWriter extends AbstractMultiFileWriter {
 
-  private final NavigableMap<Long, StoreFileWriter> lowerBoundary2Writer
-    = new TreeMap<Long, StoreFileWriter>();
+  private final NavigableMap<Long, StoreFileWriter> lowerBoundary2Writer = new TreeMap<>();
 
   private final boolean needEmptyFile;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index a31c2c3..4757e1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -129,7 +129,7 @@ public class DefaultMemStore extends AbstractMemStore {
    * Scanners are ordered from 0 (oldest) to newest in increasing order.
    */
   public List<KeyValueScanner> getScanners(long readPt) throws IOException {
-    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(2);
+    List<KeyValueScanner> list = new ArrayList<>(2);
     list.add(this.active.getScanner(readPt, 1));
     list.add(this.snapshot.getScanner(readPt, 0));
     return Collections.<KeyValueScanner> singletonList(
@@ -138,7 +138,7 @@ public class DefaultMemStore extends AbstractMemStore {
 
   @Override
   protected List<Segment> getSegments() throws IOException {
-    List<Segment> list = new ArrayList<Segment>(2);
+    List<Segment> list = new ArrayList<>(2);
     list.add(this.active);
     list.add(this.snapshot);
     return list;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
index db0ad01..c37ae99 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
@@ -91,7 +91,7 @@ class DefaultStoreFileManager implements StoreFileManager {
 
   @Override
   public void insertNewFiles(Collection<StoreFile> sfs) throws IOException {
-    ArrayList<StoreFile> newFiles = new ArrayList<StoreFile>(storefiles);
+    ArrayList<StoreFile> newFiles = new ArrayList<>(storefiles);
     newFiles.addAll(sfs);
     sortAndSetStoreFiles(newFiles);
   }
@@ -106,7 +106,7 @@ class DefaultStoreFileManager implements StoreFileManager {
   @Override
   public Collection<StoreFile> clearCompactedFiles() {
     List<StoreFile> result = compactedfiles;
-    compactedfiles = new ArrayList<StoreFile>();
+    compactedfiles = new ArrayList<>();
     return result;
   }
 
@@ -126,10 +126,10 @@ class DefaultStoreFileManager implements StoreFileManager {
     sortAndSetStoreFiles(newStoreFiles);
     ArrayList<StoreFile> updatedCompactedfiles = null;
     if (this.compactedfiles != null) {
-      updatedCompactedfiles = new ArrayList<StoreFile>(this.compactedfiles);
+      updatedCompactedfiles = new ArrayList<>(this.compactedfiles);
       updatedCompactedfiles.addAll(newCompactedfiles);
     } else {
-      updatedCompactedfiles = new ArrayList<StoreFile>(newCompactedfiles);
+      updatedCompactedfiles = new ArrayList<>(newCompactedfiles);
     }
     markCompactedAway(newCompactedfiles);
     this.compactedfiles = sortCompactedfiles(updatedCompactedfiles);
@@ -149,7 +149,7 @@ class DefaultStoreFileManager implements StoreFileManager {
   public void removeCompactedFiles(Collection<StoreFile> removedCompactedfiles) throws IOException {
     ArrayList<StoreFile> updatedCompactedfiles = null;
     if (this.compactedfiles != null) {
-      updatedCompactedfiles = new ArrayList<StoreFile>(this.compactedfiles);
+      updatedCompactedfiles = new ArrayList<>(this.compactedfiles);
       updatedCompactedfiles.removeAll(removedCompactedfiles);
       this.compactedfiles = sortCompactedfiles(updatedCompactedfiles);
     }
@@ -157,7 +157,7 @@ class DefaultStoreFileManager implements StoreFileManager {
 
   @Override
   public final Iterator<StoreFile> getCandidateFilesForRowKeyBefore(final KeyValue targetKey) {
-    return new ArrayList<StoreFile>(Lists.reverse(this.storefiles)).iterator();
+    return new ArrayList<>(Lists.reverse(this.storefiles)).iterator();
   }
 
   @Override
@@ -204,7 +204,7 @@ class DefaultStoreFileManager implements StoreFileManager {
         LOG.info("Found an expired store file: " + sf.getPath()
             + " whose maxTimeStamp is " + fileTs + ", which is below " + maxTs);
         if (expiredStoreFiles == null) {
-          expiredStoreFiles = new ArrayList<StoreFile>();
+          expiredStoreFiles = new ArrayList<>();
         }
         expiredStoreFiles.add(sf);
       }
@@ -220,7 +220,7 @@ class DefaultStoreFileManager implements StoreFileManager {
   private List<StoreFile> sortCompactedfiles(List<StoreFile> storefiles) {
     // Sorting may not be really needed here for the compacted files?
     Collections.sort(storefiles, storeFileComparator);
-    return new ArrayList<StoreFile>(storefiles);
+    return new ArrayList<>(storefiles);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
index 93837b7..8cb3a1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
@@ -46,7 +46,7 @@ public class DefaultStoreFlusher extends StoreFlusher {
   @Override
   public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId,
       MonitoredTask status, ThroughputController throughputController) throws IOException {
-    ArrayList<Path> result = new ArrayList<Path>();
+    ArrayList<Path> result = new ArrayList<>();
     int cellsCount = snapshot.getCellsCount();
     if (cellsCount == 0) return result; // don't flush if there are no entries
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
index bb57869..6138f5f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
@@ -55,7 +55,7 @@ public class FlushAllLargeStoresPolicy extends FlushLargeStoresPolicy{
     }
     // start selection
     Collection<Store> stores = region.stores.values();
-    Set<Store> specificStoresToFlush = new HashSet<Store>();
+    Set<Store> specificStoresToFlush = new HashSet<>();
     for (Store store : stores) {
       if (shouldFlush(store)) {
         specificStoresToFlush.add(store);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java
index 61f5882..4318dce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java
@@ -39,7 +39,7 @@ public class FlushNonSloppyStoresFirstPolicy extends FlushLargeStoresPolicy {
    * @return the stores need to be flushed.
    */
   @Override public Collection<Store> selectStoresToFlush() {
-    Collection<Store> specificStoresToFlush = new HashSet<Store>();
+    Collection<Store> specificStoresToFlush = new HashSet<>();
     for(Store store : regularStores) {
       if(shouldFlush(store) || region.shouldFlushStore(store)) {
         specificStoresToFlush.add(store);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index a990ceb..b021430 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -91,7 +91,7 @@ public class HMobStore extends HStore {
   private volatile long mobScanCellsCount = 0;
   private volatile long mobScanCellsSize = 0;
   private HColumnDescriptor family;
-  private Map<String, List<Path>> map = new ConcurrentHashMap<String, List<Path>>();
+  private Map<String, List<Path>> map = new ConcurrentHashMap<>();
   private final IdLock keyLock = new IdLock();
   // When we add a MOB reference cell to the HFile, we will add 2 tags along with it
   // 1. A ref tag with type TagType.MOB_REFERENCE_TAG_TYPE. This just denote this this cell is not
@@ -109,7 +109,7 @@ public class HMobStore extends HStore {
     this.homePath = MobUtils.getMobHome(conf);
     this.mobFamilyPath = MobUtils.getMobFamilyPath(conf, this.getTableName(),
         family.getNameAsString());
-    List<Path> locations = new ArrayList<Path>(2);
+    List<Path> locations = new ArrayList<>(2);
     locations.add(mobFamilyPath);
     TableName tn = region.getTableDesc().getTableName();
     locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils.getMobRegionInfo(tn)
@@ -341,7 +341,7 @@ public class HMobStore extends HStore {
           try {
             locations = map.get(tableNameString);
             if (locations == null) {
-              locations = new ArrayList<Path>(2);
+              locations = new ArrayList<>(2);
               TableName tn = TableName.valueOf(tableNameString);
               locations.add(MobUtils.getMobFamilyPath(conf, tn, family.getNameAsString()));
               locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index f2bc068..be01220 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -250,11 +250,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   // - the thread that owns the lock (allow reentrancy)
   // - reference count of (reentrant) locks held by the thread
   // - the row itself
-  private final ConcurrentHashMap<HashedBytes, RowLockContext> lockedRows =
-      new ConcurrentHashMap<HashedBytes, RowLockContext>();
+  private final ConcurrentHashMap<HashedBytes, RowLockContext> lockedRows = new ConcurrentHashMap<>();
 
-  protected final Map<byte[], Store> stores = new ConcurrentSkipListMap<byte[], Store>(
-      Bytes.BYTES_RAWCOMPARATOR);
+  protected final Map<byte[], Store> stores = new ConcurrentSkipListMap<>(Bytes.BYTES_RAWCOMPARATOR);
 
   // TODO: account for each registered handler in HeapSize computation
   private Map<String, com.google.protobuf.Service> coprocessorServiceHandlers = Maps.newHashMap();
@@ -336,7 +334,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   // the maxSeqId up to which the store was flushed. And, skip the edits which
   // are equal to or lower than maxSeqId for each store.
   // The following map is populated when opening the region
-  Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+  Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 
   /** Saved state from replaying prepare flush cache */
   private PrepareFlushResult prepareFlushResult = null;
@@ -609,8 +607,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   final long rowProcessorTimeout;
 
   // Last flush time for each Store. Useful when we are flushing for each column
-  private final ConcurrentMap<Store, Long> lastStoreFlushTimeMap =
-      new ConcurrentHashMap<Store, Long>();
+  private final ConcurrentMap<Store, Long> lastStoreFlushTimeMap = new ConcurrentHashMap<>();
 
   final RegionServerServices rsServices;
   private RegionServerAccounting rsAccounting;
@@ -642,7 +639,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   private final boolean regionStatsEnabled;
   // Stores the replication scope of the various column families of the table
   // that has non-default scope
-  private final NavigableMap<byte[], Integer> replicationScope = new TreeMap<byte[], Integer>(
+  private final NavigableMap<byte[], Integer> replicationScope = new TreeMap<>(
       Bytes.BYTES_COMPARATOR);
 
   /**
@@ -736,7 +733,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     this.rsServices = rsServices;
     this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
     setHTableSpecificConf();
-    this.scannerReadPoints = new ConcurrentHashMap<RegionScanner, Long>();
+    this.scannerReadPoints = new ConcurrentHashMap<>();
 
     this.busyWaitDuration = conf.getLong(
       "hbase.busy.wait.duration", DEFAULT_BUSY_WAIT_DURATION);
@@ -976,8 +973,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       // initialize the thread pool for opening stores in parallel.
       ThreadPoolExecutor storeOpenerThreadPool =
         getStoreOpenAndCloseThreadPool("StoreOpener-" + this.getRegionInfo().getShortNameToLog());
-      CompletionService<HStore> completionService =
-        new ExecutorCompletionService<HStore>(storeOpenerThreadPool);
+      CompletionService<HStore> completionService = new ExecutorCompletionService<>(storeOpenerThreadPool);
 
       // initialize each store in parallel
       for (final HColumnDescriptor family : htableDescriptor.getFamilies()) {
@@ -1054,12 +1050,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
    * @return Map of StoreFiles by column family
    */
   private NavigableMap<byte[], List<Path>> getStoreFiles() {
-    NavigableMap<byte[], List<Path>> allStoreFiles =
-      new TreeMap<byte[], List<Path>>(Bytes.BYTES_COMPARATOR);
+    NavigableMap<byte[], List<Path>> allStoreFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for (Store store: getStores()) {
       Collection<StoreFile> storeFiles = store.getStorefiles();
       if (storeFiles == null) continue;
-      List<Path> storeFileNames = new ArrayList<Path>();
+      List<Path> storeFileNames = new ArrayList<>();
       for (StoreFile storeFile: storeFiles) {
         storeFileNames.add(storeFile.getPath());
       }
@@ -1626,15 +1621,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         }
       }
 
-      Map<byte[], List<StoreFile>> result =
-        new TreeMap<byte[], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
+      Map<byte[], List<StoreFile>> result = new TreeMap<>(Bytes.BYTES_COMPARATOR);
       if (!stores.isEmpty()) {
         // initialize the thread pool for closing stores in parallel.
         ThreadPoolExecutor storeCloserThreadPool =
           getStoreOpenAndCloseThreadPool("StoreCloserThread-" +
             getRegionInfo().getRegionNameAsString());
         CompletionService<Pair<byte[], Collection<StoreFile>>> completionService =
-          new ExecutorCompletionService<Pair<byte[], Collection<StoreFile>>>(storeCloserThreadPool);
+          new ExecutorCompletionService<>(storeCloserThreadPool);
 
         // close each store in parallel
         for (final Store store : stores.values()) {
@@ -1652,8 +1646,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
               .submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {
                 @Override
                 public Pair<byte[], Collection<StoreFile>> call() throws IOException {
-                  return new Pair<byte[], Collection<StoreFile>>(
-                    store.getFamily().getName(), store.close());
+                  return new Pair<>(store.getFamily().getName(), store.close());
                 }
               });
         }
@@ -1663,7 +1656,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
             Pair<byte[], Collection<StoreFile>> storeFiles = future.get();
             List<StoreFile> familyFiles = result.get(storeFiles.getFirst());
             if (familyFiles == null) {
-              familyFiles = new ArrayList<StoreFile>();
+              familyFiles = new ArrayList<>();
               result.put(storeFiles.getFirst(), familyFiles);
             }
             familyFiles.addAll(storeFiles.getSecond());
@@ -2418,12 +2411,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
           ((HStore) store).preFlushSeqIDEstimation());
     }
 
-    TreeMap<byte[], StoreFlushContext> storeFlushCtxs
-      = new TreeMap<byte[], StoreFlushContext>(Bytes.BYTES_COMPARATOR);
-    TreeMap<byte[], List<Path>> committedFiles = new TreeMap<byte[], List<Path>>(
-        Bytes.BYTES_COMPARATOR);
-    TreeMap<byte[], MemstoreSize> storeFlushableSize
-        = new TreeMap<byte[], MemstoreSize>(Bytes.BYTES_COMPARATOR);
+    TreeMap<byte[], StoreFlushContext> storeFlushCtxs = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    TreeMap<byte[], List<Path>> committedFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    TreeMap<byte[], MemstoreSize> storeFlushableSize = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     // The sequence id of this flush operation which is used to log FlushMarker and pass to
     // createFlushContext to use as the store file's sequence id. It can be in advance of edits
     // still in the memstore, edits that are in other column families yet to be flushed.
@@ -2561,7 +2551,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   private boolean writeFlushRequestMarkerToWAL(WAL wal, boolean writeFlushWalMarker) {
     if (writeFlushWalMarker && wal != null && !writestate.readOnly) {
       FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.CANNOT_FLUSH,
-        getRegionInfo(), -1, new TreeMap<byte[], List<Path>>(Bytes.BYTES_COMPARATOR));
+        getRegionInfo(), -1, new TreeMap<>(Bytes.BYTES_COMPARATOR));
       try {
         WALUtil.writeFlushMarker(wal, this.getReplicationScope(), getRegionInfo(), desc, true,
             mvcc);
@@ -2842,7 +2832,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       List<Cell> cells = e.getValue();
       assert cells instanceof RandomAccess;
 
-      Map<byte[], Integer> kvCount = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
+      Map<byte[], Integer> kvCount = new TreeMap<>(Bytes.BYTES_COMPARATOR);
       int listSize = cells.size();
       for (int i=0; i < listSize; i++) {
         Cell cell = cells.get(i);
@@ -3247,7 +3237,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       // calling the pre CP hook for batch mutation
       if (!replay && coprocessorHost != null) {
         MiniBatchOperationInProgress<Mutation> miniBatchOp =
-          new MiniBatchOperationInProgress<Mutation>(batchOp.getMutationsForCoprocs(),
+          new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(),
           batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
         if (coprocessorHost.preBatchMutate(miniBatchOp)) {
           return;
@@ -3401,7 +3391,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       // calling the post CP hook for batch mutation
       if (!replay && coprocessorHost != null) {
         MiniBatchOperationInProgress<Mutation> miniBatchOp =
-          new MiniBatchOperationInProgress<Mutation>(batchOp.getMutationsForCoprocs(),
+          new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(),
           batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
         coprocessorHost.postBatchMutate(miniBatchOp);
       }
@@ -3485,7 +3475,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         // call the coprocessor hook to do any finalization steps
         // after the put is done
         MiniBatchOperationInProgress<Mutation> miniBatchOp =
-          new MiniBatchOperationInProgress<Mutation>(batchOp.getMutationsForCoprocs(),
+          new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(),
           batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
         coprocessorHost.postBatchMutateIndispensably(miniBatchOp, success);
       }
@@ -3599,7 +3589,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     for (byte[] family : familyMap.keySet()) {
       if (!this.htableDescriptor.hasFamily(family)) {
         if (nonExistentList == null) {
-          nonExistentList = new ArrayList<byte[]>();
+          nonExistentList = new ArrayList<>();
         }
         nonExistentList.add(family);
       }
@@ -3915,7 +3905,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   private void put(final byte [] row, byte [] family, List<Cell> edits)
   throws IOException {
     NavigableMap<byte[], List<Cell>> familyMap;
-    familyMap = new TreeMap<byte[], List<Cell>>(Bytes.BYTES_COMPARATOR);
+    familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 
     familyMap.put(family, edits);
     Put p = new Put(row);
@@ -4164,7 +4154,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       // If this flag is set, make use of the hfile archiving by making recovered.edits a fake
       // column family. Have to fake out file type too by casting our recovered.edits as storefiles
       String fakeFamilyName = WALSplitter.getRegionDirRecoveredEditsDir(regiondir).getName();
-      Set<StoreFile> fakeStoreFiles = new HashSet<StoreFile>(files.size());
+      Set<StoreFile> fakeStoreFiles = new HashSet<>(files.size());
       for (Path file: files) {
         fakeStoreFiles.add(new StoreFile(getRegionFileSystem().getFileSystem(), file, this.conf,
           null, null));
@@ -4506,7 +4496,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   PrepareFlushResult replayWALFlushStartMarker(FlushDescriptor flush) throws IOException {
     long flushSeqId = flush.getFlushSequenceNumber();
 
-    HashSet<Store> storesToFlush = new HashSet<Store>();
+    HashSet<Store> storesToFlush = new HashSet<>();
     for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) {
       byte[] family = storeFlush.getFamilyName().toByteArray();
       Store store = getStore(family);
@@ -5103,7 +5093,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
     startRegionOperation(); // obtain region close lock
     try {
-      Map<Store, Long> map = new HashMap<Store, Long>();
+      Map<Store, Long> map = new HashMap<>();
       synchronized (writestate) {
         for (Store store : getStores()) {
           // TODO: some stores might see new data from flush, while others do not which
@@ -5280,7 +5270,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
   @Override
   public List<Store> getStores() {
-    List<Store> list = new ArrayList<Store>(stores.size());
+    List<Store> list = new ArrayList<>(stores.size());
     list.addAll(stores.values());
     return list;
   }
@@ -5288,7 +5278,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   @Override
   public List<String> getStoreFileList(final byte [][] columns)
     throws IllegalArgumentException {
-    List<String> storeFileNames = new ArrayList<String>();
+    List<String> storeFileNames = new ArrayList<>();
     synchronized(closeLock) {
       for(byte[] column : columns) {
         Store store = this.stores.get(column);
@@ -5560,8 +5550,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   public Map<byte[], List<Path>> bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths,
       boolean assignSeqId, BulkLoadListener bulkLoadListener, boolean copyFile) throws IOException {
     long seqId = -1;
-    Map<byte[], List<Path>> storeFiles = new TreeMap<byte[], List<Path>>(Bytes.BYTES_COMPARATOR);
-    Map<String, Long> storeFilesSizes = new HashMap<String, Long>();
+    Map<byte[], List<Path>> storeFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    Map<String, Long> storeFilesSizes = new HashMap<>();
     Preconditions.checkNotNull(familyPaths);
     // we need writeLock for multi-family bulk load
     startBulkRegionOperation(hasMultipleColumnFamilies(familyPaths));
@@ -5572,8 +5562,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       // There possibly was a split that happened between when the split keys
       // were gathered and before the HRegion's write lock was taken.  We need
       // to validate the HFile region before attempting to bulk load all of them
-      List<IOException> ioes = new ArrayList<IOException>();
-      List<Pair<byte[], String>> failures = new ArrayList<Pair<byte[], String>>();
+      List<IOException> ioes = new ArrayList<>();
+      List<Pair<byte[], String>> failures = new ArrayList<>();
       for (Pair<byte[], String> p : familyPaths) {
         byte[] familyName = p.getFirst();
         String path = p.getSecond();
@@ -5694,7 +5684,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
             if(storeFiles.containsKey(familyName)) {
               storeFiles.get(familyName).add(commitedStoreFile);
             } else {
-              List<Path> storeFileNames = new ArrayList<Path>();
+              List<Path> storeFileNames = new ArrayList<>();
               storeFileNames.add(commitedStoreFile);
               storeFiles.put(familyName, storeFileNames);
             }
@@ -5841,11 +5831,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         throws IOException {
       // Here we separate all scanners into two lists - scanner that provide data required
       // by the filter to operate (scanners list) and all others (joinedScanners list).
-      List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(scan.getFamilyMap().size());
-      List<KeyValueScanner> joinedScanners =
-          new ArrayList<KeyValueScanner>(scan.getFamilyMap().size());
+      List<KeyValueScanner> scanners = new ArrayList<>(scan.getFamilyMap().size());
+      List<KeyValueScanner> joinedScanners = new ArrayList<>(scan.getFamilyMap().size());
       // Store all already instantiated scanners for exception handling
-      List<KeyValueScanner> instantiatedScanners = new ArrayList<KeyValueScanner>();
+      List<KeyValueScanner> instantiatedScanners = new ArrayList<>();
       // handle additionalScanners
       if (additionalScanners != null && !additionalScanners.isEmpty()) {
         scanners.addAll(additionalScanners);
@@ -5973,7 +5962,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         // to handle scan or get operation.
         moreValues = nextInternal(outResults, scannerContext);
       } else {
-        List<Cell> tmpList = new ArrayList<Cell>();
+        List<Cell> tmpList = new ArrayList<>();
         moreValues = nextInternal(tmpList, scannerContext);
         outResults.addAll(tmpList);
       }
@@ -6837,7 +6826,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     // The row key is the region name
     byte[] row = r.getRegionInfo().getRegionName();
     final long now = EnvironmentEdgeManager.currentTime();
-    final List<Cell> cells = new ArrayList<Cell>(2);
+    final List<Cell> cells = new ArrayList<>(2);
     cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
       HConstants.REGIONINFO_QUALIFIER, now,
       r.getRegionInfo().toByteArray()));
@@ -6930,7 +6919,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   @Override
   public List<Cell> get(Get get, boolean withCoprocessor, long nonceGroup, long nonce)
       throws IOException {
-    List<Cell> results = new ArrayList<Cell>();
+    List<Cell> results = new ArrayList<>();
 
     // pre-get CP hook
     if (withCoprocessor && (coprocessorHost != null)) {
@@ -7068,7 +7057,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
     boolean locked = false;
     List<RowLock> acquiredRowLocks = null;
-    List<Mutation> mutations = new ArrayList<Mutation>();
+    List<Mutation> mutations = new ArrayList<>();
     Collection<byte[]> rowsToLock = processor.getRowsToLock();
     // This is assigned by mvcc either explicity in the below or in the guts of the WAL append
     // when it assigns the edit a sequencedid (A.K.A the mvcc write number).
@@ -7190,8 +7179,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     }
 
     // Case with time bound
-    FutureTask<Void> task =
-      new FutureTask<Void>(new Callable<Void>() {
+    FutureTask<Void> task = new FutureTask<>(new Callable<Void>() {
         @Override
         public Void call() throws IOException {
           try {
@@ -7280,7 +7268,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     this.writeRequestsCount.increment();
     WriteEntry writeEntry = null;
     startRegionOperation(op);
-    List<Cell> results = returnResults? new ArrayList<Cell>(mutation.size()): null;
+    List<Cell> results = returnResults? new ArrayList<>(mutation.size()): null;
     RowLock rowLock = null;
     MemstoreSize memstoreSize = new MemstoreSize();
     try {
@@ -7292,8 +7280,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
           return returnResults? cpResult: null;
         }
         Durability effectiveDurability = getEffectiveDurability(mutation.getDurability());
-        Map<Store, List<Cell>> forMemStore =
-            new HashMap<Store, List<Cell>>(mutation.getFamilyCellMap().size());
+        Map<Store, List<Cell>> forMemStore = new HashMap<>(mutation.getFamilyCellMap().size());
         // Reckon Cells to apply to WAL --  in returned walEdit -- and what to add to memstore and
         // what to return back to the client (in 'forMemStore' and 'results' respectively).
         WALEdit walEdit = reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
@@ -7468,7 +7455,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       final List<Cell> deltas, final List<Cell> results)
   throws IOException {
     byte [] columnFamily = store.getFamily().getName();
-    List<Cell> toApply = new ArrayList<Cell>(deltas.size());
+    List<Cell> toApply = new ArrayList<>(deltas.size());
     // Get previous values for all columns in this family.
     List<Cell> currentValues = get(mutation, store, deltas,
         null/*Default IsolationLevel*/,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 97cc126..144f43b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -241,7 +241,7 @@ public class HRegionFileSystem {
       return null;
     }
 
-    ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
+    ArrayList<StoreFileInfo> storeFiles = new ArrayList<>(files.length);
     for (FileStatus status: files) {
       if (validate && !StoreFileInfo.isValid(status)) {
         LOG.warn("Invalid StoreFile: " + status.getPath());
@@ -355,7 +355,7 @@ public class HRegionFileSystem {
     FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
     if (fds == null) return null;
 
-    ArrayList<String> families = new ArrayList<String>(fds.length);
+    ArrayList<String> families = new ArrayList<>(fds.length);
     for (FileStatus status: fds) {
       families.add(status.getPath().getName());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 66d2d4d..cbf6561 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -237,7 +237,7 @@ public class HRegionServer extends HasThread implements
   //true - if open region action in progress
   //false - if close region action in progress
   protected final ConcurrentMap<byte[], Boolean> regionsInTransitionInRS =
-    new ConcurrentSkipListMap<byte[], Boolean>(Bytes.BYTES_COMPARATOR);
+    new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
 
   // Cache flushing
   protected MemStoreFlusher cacheFlusher;
@@ -280,7 +280,7 @@ public class HRegionServer extends HasThread implements
    * Map of regions currently being served by this region server. Key is the
    * encoded region name.  All access should be synchronized.
    */
-  protected final Map<String, Region> onlineRegions = new ConcurrentHashMap<String, Region>();
+  protected final Map<String, Region> onlineRegions = new ConcurrentHashMap<>();
 
   /**
    * Map of encoded region names to the DataNode locations they should be hosted on
@@ -292,7 +292,7 @@ public class HRegionServer extends HasThread implements
    * and here we really mean DataNode locations.
    */
   protected final Map<String, InetSocketAddress[]> regionFavoredNodesMap =
-      new ConcurrentHashMap<String, InetSocketAddress[]>();
+      new ConcurrentHashMap<>();
 
   /**
    * Set of regions currently being in recovering state which means it can accept writes(edits from
@@ -321,7 +321,7 @@ public class HRegionServer extends HasThread implements
   // debugging and unit tests.
   private volatile boolean abortRequested;
 
-  ConcurrentMap<String, Integer> rowlocks = new ConcurrentHashMap<String, Integer>();
+  ConcurrentMap<String, Integer> rowlocks = new ConcurrentHashMap<>();
 
   // A state before we go into stopped state.  At this stage we're closing user
   // space regions.
@@ -1323,7 +1323,7 @@ public class HRegionServer extends HasThread implements
     // Wait till all regions are closed before going out.
     int lastCount = -1;
     long previousLogTime = 0;
-    Set<String> closedRegions = new HashSet<String>();
+    Set<String> closedRegions = new HashSet<>();
     boolean interrupted = false;
     try {
       while (!isOnlineRegionsEmpty()) {
@@ -1746,7 +1746,7 @@ public class HRegionServer extends HasThread implements
     createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir);
 
     // listeners the wal factory will add to wals it creates.
-    final List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
+    final List<WALActionsListener> listeners = new ArrayList<>();
     listeners.add(new MetricsWAL());
     if (this.replicationSourceHandler != null &&
         this.replicationSourceHandler.getWALActionsListener() != null) {
@@ -2657,7 +2657,7 @@ public class HRegionServer extends HasThread implements
    */
   SortedMap<Long, Region> getCopyOfOnlineRegionsSortedBySize() {
     // we'll sort the regions in reverse
-    SortedMap<Long, Region> sortedRegions = new TreeMap<Long, Region>(
+    SortedMap<Long, Region> sortedRegions = new TreeMap<>(
         new Comparator<Long>() {
           @Override
           public int compare(Long a, Long b) {
@@ -2691,7 +2691,7 @@ public class HRegionServer extends HasThread implements
    * the first N regions being served regardless of load.)
    */
   protected HRegionInfo[] getMostLoadedRegions() {
-    ArrayList<HRegionInfo> regions = new ArrayList<HRegionInfo>();
+    ArrayList<HRegionInfo> regions = new ArrayList<>();
     for (Region r : onlineRegions.values()) {
       if (!r.isAvailable()) {
         continue;
@@ -2903,7 +2903,7 @@ public class HRegionServer extends HasThread implements
    */
   @Override
   public List<Region> getOnlineRegions(TableName tableName) {
-     List<Region> tableRegions = new ArrayList<Region>();
+     List<Region> tableRegions = new ArrayList<>();
      synchronized (this.onlineRegions) {
        for (Region region: this.onlineRegions.values()) {
          HRegionInfo regionInfo = region.getRegionInfo();
@@ -2917,7 +2917,7 @@ public class HRegionServer extends HasThread implements
 
   @Override
   public List<Region> getOnlineRegions() {
-    List<Region> allRegions = new ArrayList<Region>();
+    List<Region> allRegions = new ArrayList<>();
     synchronized (this.onlineRegions) {
       // Return a clone copy of the onlineRegions
       allRegions.addAll(onlineRegions.values());
@@ -2931,7 +2931,7 @@ public class HRegionServer extends HasThread implements
    */
   @Override
   public Set<TableName> getOnlineTables() {
-    Set<TableName> tables = new HashSet<TableName>();
+    Set<TableName> tables = new HashSet<>();
     synchronized (this.onlineRegions) {
       for (Region region: this.onlineRegions.values()) {
         tables.add(region.getTableDesc().getTableName());
@@ -2942,7 +2942,7 @@ public class HRegionServer extends HasThread implements
 
   // used by org/apache/hbase/tmpl/regionserver/RSStatusTmpl.jamon (HBASE-4070).
   public String[] getRegionServerCoprocessors() {
-    TreeSet<String> coprocessors = new TreeSet<String>();
+    TreeSet<String> coprocessors = new TreeSet<>();
     try {
       coprocessors.addAll(getWAL(null).getCoprocessorHost().getCoprocessors());
     } catch (IOException exception) {
@@ -3306,7 +3306,7 @@ public class HRegionServer extends HasThread implements
   // This map will contains all the regions that we closed for a move.
   //  We add the time it was moved as we don't want to keep too old information
   protected Map<String, MovedRegionInfo> movedRegions =
-      new ConcurrentHashMap<String, MovedRegionInfo>(3000);
+      new ConcurrentHashMap<>(3000);
 
   // We need a timeout. If not there is a risk of giving a wrong information: this would double
   //  the number of network calls instead of reducing them.

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index b74e635..8a66c3a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -509,14 +509,13 @@ public class HStore implements Store {
 
   private List<StoreFile> openStoreFiles(Collection<StoreFileInfo> files) throws IOException {
     if (files == null || files.isEmpty()) {
-      return new ArrayList<StoreFile>();
+      return new ArrayList<>();
     }
     // initialize the thread pool for opening store files in parallel..
     ThreadPoolExecutor storeFileOpenerThreadPool =
       this.region.getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" +
           this.getColumnFamilyName());
-    CompletionService<StoreFile> completionService =
-      new ExecutorCompletionService<StoreFile>(storeFileOpenerThreadPool);
+    CompletionService<StoreFile> completionService = new ExecutorCompletionService<>(storeFileOpenerThreadPool);
 
     int totalValidStoreFile = 0;
     for (final StoreFileInfo storeFileInfo: files) {
@@ -531,7 +530,7 @@ public class HStore implements Store {
       totalValidStoreFile++;
     }
 
-    ArrayList<StoreFile> results = new ArrayList<StoreFile>(files.size());
+    ArrayList<StoreFile> results = new ArrayList<>(files.size());
     IOException ioe = null;
     try {
       for (int i = 0; i < totalValidStoreFile; i++) {
@@ -588,7 +587,7 @@ public class HStore implements Store {
 
   @Override
   public void refreshStoreFiles(Collection<String> newFiles) throws IOException {
-    List<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(newFiles.size());
+    List<StoreFileInfo> storeFiles = new ArrayList<>(newFiles.size());
     for (String file : newFiles) {
       storeFiles.add(fs.getStoreFileInfo(getColumnFamilyName(), file));
     }
@@ -605,16 +604,15 @@ public class HStore implements Store {
   private void refreshStoreFilesInternal(Collection<StoreFileInfo> newFiles) throws IOException {
     StoreFileManager sfm = storeEngine.getStoreFileManager();
     Collection<StoreFile> currentFiles = sfm.getStorefiles();
-    if (currentFiles == null) currentFiles = new ArrayList<StoreFile>(0);
+    if (currentFiles == null) currentFiles = new ArrayList<>(0);
 
-    if (newFiles == null) newFiles = new ArrayList<StoreFileInfo>(0);
+    if (newFiles == null) newFiles = new ArrayList<>(0);
 
-    HashMap<StoreFileInfo, StoreFile> currentFilesSet =
-        new HashMap<StoreFileInfo, StoreFile>(currentFiles.size());
+    HashMap<StoreFileInfo, StoreFile> currentFilesSet = new HashMap<>(currentFiles.size());
     for (StoreFile sf : currentFiles) {
       currentFilesSet.put(sf.getFileInfo(), sf);
     }
-    HashSet<StoreFileInfo> newFilesSet = new HashSet<StoreFileInfo>(newFiles);
+    HashSet<StoreFileInfo> newFilesSet = new HashSet<>(newFiles);
 
     Set<StoreFileInfo> toBeAddedFiles = Sets.difference(newFilesSet, currentFilesSet.keySet());
     Set<StoreFileInfo> toBeRemovedFiles = Sets.difference(currentFilesSet.keySet(), newFilesSet);
@@ -626,7 +624,7 @@ public class HStore implements Store {
     LOG.info("Refreshing store files for region " + this.getRegionInfo().getRegionNameAsString()
       + " files to add: " + toBeAddedFiles + " files to remove: " + toBeRemovedFiles);
 
-    Set<StoreFile> toBeRemovedStoreFiles = new HashSet<StoreFile>(toBeRemovedFiles.size());
+    Set<StoreFile> toBeRemovedStoreFiles = new HashSet<>(toBeRemovedFiles.size());
     for (StoreFileInfo sfi : toBeRemovedFiles) {
       toBeRemovedStoreFiles.add(currentFilesSet.get(sfi));
     }
@@ -879,7 +877,7 @@ public class HStore implements Store {
 
         // close each store file in parallel
         CompletionService<Void> completionService =
-          new ExecutorCompletionService<Void>(storeFileCloserThreadPool);
+          new ExecutorCompletionService<>(storeFileCloserThreadPool);
         for (final StoreFile f : result) {
           completionService.submit(new Callable<Void>() {
             @Override
@@ -1183,7 +1181,7 @@ public class HStore implements Store {
     // actually more correct, since memstore get put at the end.
     List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(storeFilesToScan,
       cacheBlocks, usePread, isCompaction, false, matcher, readPt, isPrimaryReplicaStore());
-    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(sfScanners.size() + 1);
+    List<KeyValueScanner> scanners = new ArrayList<>(sfScanners.size() + 1);
     scanners.addAll(sfScanners);
     // Then the memstore scanners
     scanners.addAll(memStoreScanners);
@@ -1206,7 +1204,7 @@ public class HStore implements Store {
     }
     List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(files,
       cacheBlocks, usePread, isCompaction, false, matcher, readPt, isPrimaryReplicaStore());
-    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(sfScanners.size() + 1);
+    List<KeyValueScanner> scanners = new ArrayList<>(sfScanners.size() + 1);
     scanners.addAll(sfScanners);
     // Then the memstore scanners
     if (memStoreScanners != null) {
@@ -1312,7 +1310,7 @@ public class HStore implements Store {
       // TODO: get rid of this!
       if (!this.conf.getBoolean("hbase.hstore.compaction.complete", true)) {
         LOG.warn("hbase.hstore.compaction.complete is set to false");
-        sfs = new ArrayList<StoreFile>(newFiles.size());
+        sfs = new ArrayList<>(newFiles.size());
         final boolean evictOnClose =
             cacheConf != null? cacheConf.shouldEvictOnClose(): true;
         for (Path newFile : newFiles) {
@@ -1359,7 +1357,7 @@ public class HStore implements Store {
 
   private List<StoreFile> moveCompatedFilesIntoPlace(
       final CompactionRequest cr, List<Path> newFiles, User user) throws IOException {
-    List<StoreFile> sfs = new ArrayList<StoreFile>(newFiles.size());
+    List<StoreFile> sfs = new ArrayList<>(newFiles.size());
     for (Path newFile : newFiles) {
       assert newFile != null;
       final StoreFile sf = moveFileIntoPlace(newFile);
@@ -1389,11 +1387,11 @@ public class HStore implements Store {
   private void writeCompactionWalRecord(Collection<StoreFile> filesCompacted,
       Collection<StoreFile> newFiles) throws IOException {
     if (region.getWAL() == null) return;
-    List<Path> inputPaths = new ArrayList<Path>(filesCompacted.size());
+    List<Path> inputPaths = new ArrayList<>(filesCompacted.size());
     for (StoreFile f : filesCompacted) {
       inputPaths.add(f.getPath());
     }
-    List<Path> outputPaths = new ArrayList<Path>(newFiles.size());
+    List<Path> outputPaths = new ArrayList<>(newFiles.size());
     for (StoreFile f : newFiles) {
       outputPaths.add(f.getPath());
     }
@@ -1489,14 +1487,14 @@ public class HStore implements Store {
     // being in the store's folder) or they may be missing due to a compaction.
 
     String familyName = this.getColumnFamilyName();
-    List<String> inputFiles = new ArrayList<String>(compactionInputs.size());
+    List<String> inputFiles = new ArrayList<>(compactionInputs.size());
     for (String compactionInput : compactionInputs) {
       Path inputPath = fs.getStoreFilePath(familyName, compactionInput);
       inputFiles.add(inputPath.getName());
     }
 
     //some of the input files might already be deleted
-    List<StoreFile> inputStoreFiles = new ArrayList<StoreFile>(compactionInputs.size());
+    List<StoreFile> inputStoreFiles = new ArrayList<>(compactionInputs.size());
     for (StoreFile sf : this.getStorefiles()) {
       if (inputFiles.contains(sf.getPath().getName())) {
         inputStoreFiles.add(sf);
@@ -1504,7 +1502,7 @@ public class HStore implements Store {
     }
 
     // check whether we need to pick up the new files
-    List<StoreFile> outputStoreFiles = new ArrayList<StoreFile>(compactionOutputs.size());
+    List<StoreFile> outputStoreFiles = new ArrayList<>(compactionOutputs.size());
 
     if (pickCompactionFiles) {
       for (StoreFile sf : this.getStorefiles()) {
@@ -1738,7 +1736,7 @@ public class HStore implements Store {
     }
     if (delSfs == null || delSfs.isEmpty()) return;
 
-    Collection<StoreFile> newFiles = new ArrayList<StoreFile>(); // No new files.
+    Collection<StoreFile> newFiles = new ArrayList<>(); // No new files.
     writeCompactionWalRecord(delSfs, newFiles);
     replaceStoreFiles(delSfs, newFiles);
     completeCompaction(delSfs);
@@ -2167,7 +2165,7 @@ public class HStore implements Store {
       this.snapshot = memstore.snapshot();
       this.cacheFlushCount = snapshot.getCellsCount();
       this.cacheFlushSize = snapshot.getDataSize();
-      committedFiles = new ArrayList<Path>(1);
+      committedFiles = new ArrayList<>(1);
     }
 
     @Override
@@ -2183,7 +2181,7 @@ public class HStore implements Store {
       if (this.tempFiles == null || this.tempFiles.isEmpty()) {
         return false;
       }
-      List<StoreFile> storeFiles = new ArrayList<StoreFile>(this.tempFiles.size());
+      List<StoreFile> storeFiles = new ArrayList<>(this.tempFiles.size());
       for (Path storeFilePath : tempFiles) {
         try {
           StoreFile sf = HStore.this.commitFile(storeFilePath, cacheFlushSeqNum, status);
@@ -2241,7 +2239,7 @@ public class HStore implements Store {
     @Override
     public void replayFlush(List<String> fileNames, boolean dropMemstoreSnapshot)
         throws IOException {
-      List<StoreFile> storeFiles = new ArrayList<StoreFile>(fileNames.size());
+      List<StoreFile> storeFiles = new ArrayList<>(fileNames.size());
       for (String file : fileNames) {
         // open the file as a store file (hfile link, etc)
         StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file);
@@ -2273,7 +2271,7 @@ public class HStore implements Store {
       if (snapshot == null) {
         return;
       }
-      HStore.this.updateStorefiles(new ArrayList<StoreFile>(0), snapshot.getId());
+      HStore.this.updateStorefiles(new ArrayList<>(0), snapshot.getId());
     }
   }
 
@@ -2424,7 +2422,7 @@ public class HStore implements Store {
             this.getStoreEngine().getStoreFileManager().getCompactedfiles();
         if (compactedfiles != null && compactedfiles.size() != 0) {
           // Do a copy under read lock
-          copyCompactedfiles = new ArrayList<StoreFile>(compactedfiles);
+          copyCompactedfiles = new ArrayList<>(compactedfiles);
         } else {
           if (LOG.isTraceEnabled()) {
             LOG.trace("No compacted files to archive");
@@ -2449,7 +2447,7 @@ public class HStore implements Store {
    */
   private void removeCompactedfiles(Collection<StoreFile> compactedfiles)
       throws IOException {
-    final List<StoreFile> filesToRemove = new ArrayList<StoreFile>(compactedfiles.size());
+    final List<StoreFile> filesToRemove = new ArrayList<>(compactedfiles.size());
     for (final StoreFile file : compactedfiles) {
       synchronized (file) {
         try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
index c7099a5..e834306 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
@@ -106,7 +106,7 @@ public class HeapMemoryManager {
 
   private MetricsHeapMemoryManager metricsHeapMemoryManager;
 
-  private List<HeapMemoryTuneObserver> tuneObservers = new ArrayList<HeapMemoryTuneObserver>();
+  private List<HeapMemoryTuneObserver> tuneObservers = new ArrayList<>();
 
   public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher,
       Server server, RegionServerAccounting regionServerAccounting) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
index faa9b67..501c1e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
@@ -157,7 +157,7 @@ public class ImmutableSegment extends Segment {
   }
 
   public List<Segment> getAllSegments() {
-    List<Segment> res = new ArrayList<Segment>(Arrays.asList(this));
+    List<Segment> res = new ArrayList<>(Arrays.asList(this));
     return res;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
index ff76d20..195e8f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
@@ -88,11 +88,9 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
   KeyValueHeap(List<? extends KeyValueScanner> scanners,
       KVScannerComparator comparator) throws IOException {
     this.comparator = comparator;
-    this.scannersForDelayedClose = new ArrayList<KeyValueScanner>(
-        scanners.size());
+    this.scannersForDelayedClose = new ArrayList<>(scanners.size());
     if (!scanners.isEmpty()) {
-      this.heap = new PriorityQueue<KeyValueScanner>(scanners.size(),
-          this.comparator);
+      this.heap = new PriorityQueue<>(scanners.size(), this.comparator);
       for (KeyValueScanner scanner : scanners) {
         if (scanner.peek() != null) {
           this.heap.add(scanner);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
index 4af703c..b12b7b5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
@@ -56,7 +56,7 @@ import java.io.IOException;
 public class Leases extends HasThread {
   private static final Log LOG = LogFactory.getLog(Leases.class.getName());
   public static final int MIN_WAIT_TIME = 100;
-  private final Map<String, Lease> leases = new ConcurrentHashMap<String, Lease>();
+  private final Map<String, Lease> leases = new ConcurrentHashMap<>();
 
   protected final int leaseCheckFrequency;
   protected volatile boolean stopRequested = false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
index 24f0d1a..9d1bc4b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
@@ -54,8 +54,7 @@ public class LogRoller extends HasThread implements Closeable {
   private static final Log LOG = LogFactory.getLog(LogRoller.class);
   private final ReentrantLock rollLock = new ReentrantLock();
   private final AtomicBoolean rollLog = new AtomicBoolean(false);
-  private final ConcurrentHashMap<WAL, Boolean> walNeedsRoll =
-      new ConcurrentHashMap<WAL, Boolean>();
+  private final ConcurrentHashMap<WAL, Boolean> walNeedsRoll = new ConcurrentHashMap<>();
   private final Server server;
   protected final RegionServerServices services;
   private volatile long lastrolltime = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
index 8975ac7..a339abf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
@@ -627,7 +627,7 @@ implements HeapSize, Map<K,V> {
   */
   private long addEntry(int hash, K key, V value, int bucketIndex) {
     Entry<K,V> e = entries[bucketIndex];
-    Entry<K,V> newE = new Entry<K,V>(hash, key, value, e, tailPtr);
+    Entry<K,V> newE = new Entry<>(hash, key, value, e, tailPtr);
     entries[bucketIndex] = newE;
     // add as most recently used in lru
     if (size == 0) {
@@ -810,7 +810,7 @@ implements HeapSize, Map<K,V> {
    * @return Sorted list of entries
    */
   public List<Entry<K,V>> entryLruList() {
-    List<Entry<K,V>> entryList = new ArrayList<Entry<K,V>>();
+    List<Entry<K,V>> entryList = new ArrayList<>();
     Entry<K,V> entry = headPtr;
     while(entry != null) {
       entryList.add(entry);
@@ -827,7 +827,7 @@ implements HeapSize, Map<K,V> {
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
       justification="Unused debugging function that reads only")
   public Set<Entry<K,V>> entryTableSet() {
-    Set<Entry<K,V>> entrySet = new HashSet<Entry<K,V>>();
+    Set<Entry<K,V>> entrySet = new HashSet<>();
     Entry [] table = entries; // FindBugs IS2_INCONSISTENT_SYNC
     for(int i=0;i<table.length;i++) {
       for(Entry e = table[i]; e != null; e = e.next) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java
index f31c973..6a30eac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java
@@ -38,7 +38,7 @@ import java.util.List;
 @InterfaceAudience.Private
 public class MemStoreCompactorSegmentsIterator extends MemStoreSegmentsIterator {
 
-  private List<Cell> kvs = new ArrayList<Cell>();
+  private List<Cell> kvs = new ArrayList<>();
   private boolean hasMore;
   private Iterator<Cell> kvsIterator;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index fd77cf9..174d3ca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -77,10 +77,8 @@ class MemStoreFlusher implements FlushRequester {
   private Configuration conf;
   // These two data members go together.  Any entry in the one must have
   // a corresponding entry in the other.
-  private final BlockingQueue<FlushQueueEntry> flushQueue =
-    new DelayQueue<FlushQueueEntry>();
-  private final Map<Region, FlushRegionEntry> regionsInQueue =
-    new HashMap<Region, FlushRegionEntry>();
+  private final BlockingQueue<FlushQueueEntry> flushQueue = new DelayQueue<>();
+  private final Map<Region, FlushRegionEntry> regionsInQueue = new HashMap<>();
   private AtomicBoolean wakeupPending = new AtomicBoolean();
 
   private final long threadWakeFrequency;
@@ -92,7 +90,7 @@ class MemStoreFlusher implements FlushRequester {
   private final LongAdder updatesBlockedMsHighWater = new LongAdder();
 
   private final FlushHandler[] flushHandlers;
-  private List<FlushRequestListener> flushRequestListeners = new ArrayList<FlushRequestListener>(1);
+  private List<FlushRequestListener> flushRequestListeners = new ArrayList<>(1);
 
   /**
    * @param conf
@@ -131,7 +129,7 @@ class MemStoreFlusher implements FlushRequester {
    */
   private boolean flushOneForGlobalPressure() {
     SortedMap<Long, Region> regionsBySize = server.getCopyOfOnlineRegionsSortedBySize();
-    Set<Region> excludedRegions = new HashSet<Region>();
+    Set<Region> excludedRegions = new HashSet<>();
 
     double secondaryMultiplier
       = ServerRegionReplicaUtil.getRegionReplicaStoreFileRefreshMultiplier(conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
index 30e4311..4e87135 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
@@ -65,7 +65,7 @@ public class MemStoreLABImpl implements MemStoreLAB {
 
   static final Log LOG = LogFactory.getLog(MemStoreLABImpl.class);
 
-  private AtomicReference<Chunk> curChunk = new AtomicReference<Chunk>();
+  private AtomicReference<Chunk> curChunk = new AtomicReference<>();
   // A queue of chunks from pool contained by this memstore LAB
   // TODO: in the future, it would be better to have List implementation instead of Queue,
   // as FIFO order is not so important here

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java
index e2f4ebb..7728534 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java
@@ -49,7 +49,7 @@ public abstract class MemStoreSegmentsIterator implements Iterator<Cell> {
     this.scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
 
     // list of Scanners of segments in the pipeline, when compaction starts
-    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>();
+    List<KeyValueScanner> scanners = new ArrayList<>();
 
     // create the list of scanners to traverse over all the data
     // no dirty reads here as these are immutable segments
@@ -61,4 +61,4 @@ public abstract class MemStoreSegmentsIterator implements Iterator<Cell> {
   }
 
   public abstract void close();
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
index 995ea93..a0cd79d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
@@ -131,7 +131,7 @@ MultiRowMutationProcessorResponse> {
     Arrays.fill(opStatus, OperationStatus.NOT_RUN);
     WALEdit[] walEditsFromCP = new WALEdit[mutations.size()];
     if (coprocessorHost != null) {
-      miniBatch = new MiniBatchOperationInProgress<Mutation>(
+      miniBatch = new MiniBatchOperationInProgress<>(
           mutations.toArray(new Mutation[mutations.size()]), opStatus, walEditsFromCP, 0,
           mutations.size());
       coprocessorHost.preBatchMutate(miniBatch);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
index ee4fbb9..ffcc834 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
@@ -54,7 +54,7 @@ public class MultiVersionConcurrencyControl {
   // reduce the number of allocations on the write path?
   // This could be equal to the number of handlers + a small number.
   // TODO: St.Ack 20150903 Sounds good to me.
-  private final LinkedList<WriteEntry> writeQueue = new LinkedList<WriteEntry>();
+  private final LinkedList<WriteEntry> writeQueue = new LinkedList<>();
 
   public MultiVersionConcurrencyControl() {
     super();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index e6e43a4..7312852 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -323,7 +323,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
    * completion of multiGets.
    */
    static class RegionScannersCloseCallBack implements RpcCallback {
-    private final List<RegionScanner> scanners = new ArrayList<RegionScanner>();
+    private final List<RegionScanner> scanners = new ArrayList<>();
 
     public void addScanner(RegionScanner scanner) {
       this.scanners.add(scanner);
@@ -818,7 +818,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             case DELETE:
               // Collect the individual mutations and apply in a batch
               if (mutations == null) {
-                mutations = new ArrayList<ClientProtos.Action>(actions.getActionCount());
+                mutations = new ArrayList<>(actions.getActionCount());
               }
               mutations.add(action);
               break;
@@ -834,7 +834,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             pbResult = ProtobufUtil.toResultNoData(r);
             //  Hard to guess the size here.  Just make a rough guess.
             if (cellsToReturn == null) {
-              cellsToReturn = new ArrayList<CellScannable>();
+              cellsToReturn = new ArrayList<>();
             }
             cellsToReturn.add(r);
           } else {
@@ -1301,7 +1301,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
    * @return list of blocking services and their security info classes that this server supports
    */
   protected List<BlockingServiceAndInterface> getServices() {
-    List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(2);
+    List<BlockingServiceAndInterface> bssi = new ArrayList<>(2);
     bssi.add(new BlockingServiceAndInterface(
       ClientService.newReflectiveBlockingService(this),
       ClientService.BlockingInterface.class));
@@ -1543,7 +1543,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       checkOpen();
       requestCount.increment();
       Map<String, Region> onlineRegions = regionServer.onlineRegions;
-      List<HRegionInfo> list = new ArrayList<HRegionInfo>(onlineRegions.size());
+      List<HRegionInfo> list = new ArrayList<>(onlineRegions.size());
       for (Region region: onlineRegions.values()) {
         list.add(region.getRegionInfo());
       }
@@ -1587,7 +1587,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     } else {
       regions = regionServer.getOnlineRegions();
     }
-    List<RegionLoad> rLoads = new ArrayList<RegionLoad>(regions.size());
+    List<RegionLoad> rLoads = new ArrayList<>(regions.size());
     RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder();
     RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder();
 
@@ -1636,7 +1636,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       if (request.getFamilyCount() == 0) {
         columnFamilies = region.getTableDesc().getFamiliesKeys();
       } else {
-        columnFamilies = new TreeSet<byte[]>(Bytes.BYTES_RAWCOMPARATOR);
+        columnFamilies = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR);
         for (ByteString cf: request.getFamilyList()) {
           columnFamilies.add(cf.toByteArray());
         }
@@ -1692,8 +1692,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 
     OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
     final int regionCount = request.getOpenInfoCount();
-    final Map<TableName, HTableDescriptor> htds =
-        new HashMap<TableName, HTableDescriptor>(regionCount);
+    final Map<TableName, HTableDescriptor> htds = new HashMap<>(regionCount);
     final boolean isBulkAssign = regionCount > 1;
     try {
       checkOpen();
@@ -1783,7 +1782,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             } else {
               // Remove stale recovery region from ZK when we open region not for recovering which
               // could happen when turn distributedLogReplay off from on.
-              List<String> tmpRegions = new ArrayList<String>();
+              List<String> tmpRegions = new ArrayList<>();
               tmpRegions.add(region.getEncodedName());
               ZKSplitLog.deleteRecoveringRegionZNodes(regionServer.getZooKeeper(),
                 tmpRegions);
@@ -1914,7 +1913,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())
             ? region.getCoprocessorHost()
             : null; // do not invoke coprocessors if this is a secondary region replica
-      List<Pair<WALKey, WALEdit>> walEntries = new ArrayList<Pair<WALKey, WALEdit>>();
+      List<Pair<WALKey, WALEdit>> walEntries = new ArrayList<>();
 
       // Skip adding the edits to WAL if this is a secondary region replica
       boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
@@ -1935,8 +1934,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
               nonce,
               entry.getKey().getWriteTime());
         }
-        Pair<WALKey, WALEdit> walEntry = (coprocessorHost == null) ? null :
-          new Pair<WALKey, WALEdit>();
+        Pair<WALKey, WALEdit> walEntry = (coprocessorHost == null) ? null : new Pair<>();
         List<WALSplitter.MutationReplay> edits = WALSplitter.getMutationsFromWALEntry(entry,
           cells, walEntry, durability);
         if (coprocessorHost != null) {
@@ -2132,11 +2130,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 
       if (!request.hasBulkToken()) {
         // Old style bulk load. This will not be supported in future releases
-        List<Pair<byte[], String>> familyPaths =
-            new ArrayList<Pair<byte[], String>>(request.getFamilyPathCount());
+        List<Pair<byte[], String>> familyPaths = new ArrayList<>(request.getFamilyPathCount());
         for (FamilyPath familyPath : request.getFamilyPathList()) {
-          familyPaths.add(new Pair<byte[], String>(familyPath.getFamily().toByteArray(), familyPath
-              .getPath()));
+          familyPaths.add(new Pair<>(familyPath.getFamily().toByteArray(), familyPath.getPath()));
         }
         if (region.getCoprocessorHost() != null) {
           bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
@@ -2317,7 +2313,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   private Result get(Get get, HRegion region, RegionScannersCloseCallBack closeCallBack,
       RpcCallContext context) throws IOException {
     region.prepareGet(get);
-    List<Cell> results = new ArrayList<Cell>();
+    List<Cell> results = new ArrayList<>();
     boolean stale = region.getRegionInfo().getReplicaId() != 0;
     // pre-get CP hook
     if (region.getCoprocessorHost() != null) {
@@ -2789,7 +2785,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     // This is cells inside a row. Default size is 10 so if many versions or many cfs,
     // then we'll resize. Resizings show in profiler. Set it higher than 10. For now
     // arbitrary 32. TODO: keep record of general size of results being returned.
-    List<Cell> values = new ArrayList<Cell>(32);
+    List<Cell> values = new ArrayList<>(32);
     region.startRegionOperation(Operation.SCAN);
     try {
       int i = 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 649273d..925e349 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -361,7 +361,7 @@ public class RegionCoprocessorHost
 
     // scan the table attributes for coprocessor load specifications
     // initialize the coprocessors
-    List<RegionEnvironment> configured = new ArrayList<RegionEnvironment>();
+    List<RegionEnvironment> configured = new ArrayList<>();
     for (TableCoprocessorAttribute attr: getTableCoprocessorAttrsFromSchema(conf, 
         region.getTableDesc())) {
       // Load encompasses classloading and coprocessor initialization
@@ -405,7 +405,7 @@ public class RegionCoprocessorHost
       // remain in this map
       classData = (ConcurrentMap<String, Object>)sharedDataMap.get(implClass.getName());
       if (classData == null) {
-        classData = new ConcurrentHashMap<String, Object>();
+        classData = new ConcurrentHashMap<>();
         sharedDataMap.put(implClass.getName(), classData);
       }
     }