You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2017/05/12 22:18:29 UTC

[26/51] [abbrv] [partial] geode git commit: GEODE-2632: change dependencies on GemFireCacheImpl to InternalCache

http://git-wip-us.apache.org/repos/asf/geode/blob/654d65b5/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java b/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
index 65ea728..6098d4b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
@@ -47,7 +47,6 @@ import org.apache.geode.internal.logging.log4j.LogMarker;
  * An oplog used for overflow-only regions. For regions that are persistent (i.e. they can be
  * recovered) see {@link Oplog}.
  * 
- * 
  * @since GemFire prPersistSprint2
  */
 class OverflowOplog implements CompactableOplog, Flushable {
@@ -63,10 +62,9 @@ class OverflowOplog implements CompactableOplog, Flushable {
   private volatile boolean closed;
 
   private final OplogFile crf = new OplogFile();
+
   private final ByteBuffer[] bbArray = new ByteBuffer[2];
 
-  /** preallocated space available for writing to* */
-  // volatile private long opLogSpace = 0L;
   /** The stats for this store */
   private final DiskStoreStats stats;
 
@@ -99,52 +97,9 @@ class OverflowOplog implements CompactableOplog, Flushable {
 
   private final OplogDiskEntry liveEntries = new OplogDiskEntry();
 
-  private static final ByteBuffer EMPTY = ByteBuffer.allocate(0);
-
-  // ///////////////////// Constructors ////////////////////////
-  // /**
-  // * Creates new <code>Oplog</code> for the given region.
-  // *
-  // * @param oplogId
-  // * int identifying the new oplog
-  // * @param dirHolder
-  // * The directory in which to create new Oplog
-  // *
-  // * @throws DiskAccessException
-  // * if the disk files can not be initialized
-  // */
-  // OverflowOplog(int oplogId, DiskStoreImpl parent, DirectoryHolder dirHolder) {
-  // this.oplogId = oplogId;
-  // this.parent = parent;
-  // this.dirHolder = dirHolder;
-  // this.opState = new OpState();
-  // long maxOplogSizeParam = this.parent.getMaxOplogSizeInBytes();
-  // long availableSpace = this.dirHolder.getAvailableSpace();
-  // if (availableSpace < maxOplogSizeParam) {
-  // this.maxOplogSize = availableSpace;
-  // } else {
-  // this.maxOplogSize = maxOplogSizeParam;
-  // }
-  // this.stats = this.parent.getStats();
-
-  // this.closed = false;
-  // String n = this.parent.getName();
-  // this.diskFile = new File(this.dirHolder.getDir(),
-  // "OVERFLOW"
-  // + n + "_" + oplogId);
-  // try {
-  // createCrf();
-  // }
-  // catch (IOException ex) {
-  // throw new
-  // DiskAccessException(LocalizedStrings.Oplog_FAILED_CREATING_OPERATION_LOG_BECAUSE_0.toLocalizedString(ex),
-  // this.parent);
-  // }
-  // }
-
   /**
-   * Asif: A copy constructor used for creating a new oplog based on the previous Oplog. This
-   * constructor is invoked only from the function switchOplog
+   * A copy constructor used for creating a new oplog based on the previous Oplog. This constructor
+   * is invoked only from the function switchOplog
    * 
    * @param oplogId integer identifying the new oplog
    * @param dirHolder The directory in which to create new Oplog
@@ -201,26 +156,15 @@ class OverflowOplog implements CompactableOplog, Flushable {
     try {
       olf.raf.setLength(this.maxOplogSize);
       olf.raf.seek(0);
-    } catch (IOException ioe) {
-      // @todo need a warning since this can impact perf.
+    } catch (IOException ignore) {
+      // TODO: need a warning since this can impact perf.
       // I don't think I need any of this. If setLength throws then
       // the file is still ok.
-      // raf.close();
-      // if (!this.opLogFile.delete() && this.opLogFile.exists()) {
-      // throw new
-      // DiskAccessException(LocalizedStrings.NewLBHTreeDiskRegion_COULD_NOT_DELETE__0_.toLocalizedString(this.opLogFile.getAbsolutePath()),
-      // this.owner);
-      // }
-      // f = new File(this.diskFile.getPath() + OPLOG_FILE_EXT);
-      // this.opLogFile = f;
-      // raf = new RandomAccessFile(f, "rw");
     }
   }
 
   /**
    * Creates the crf oplog file
-   * 
-   * @throws IOException
    */
   private void createCrf(OverflowOplog previous) throws IOException {
     File f = new File(this.diskFile.getPath() + CRF_FILE_EXT);
@@ -245,7 +189,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
       result = previous.consumeWriteBuf();
     }
     if (result == null) {
-      result = ByteBuffer.allocateDirect(Integer.getInteger("WRITE_BUF_SIZE", 32768).intValue());
+      result = ByteBuffer.allocateDirect(Integer.getInteger("WRITE_BUF_SIZE", 32768));
     }
     return result;
   }
@@ -266,20 +210,11 @@ class OverflowOplog implements CompactableOplog, Flushable {
   }
 
   /**
-   * Flushes any pending writes to disk.
-   * 
-   * public final void flush() { forceFlush(); }
-   */
-
-  /**
    * Test Method to be used only for testing purposes. Gets the underlying File object for the Oplog
    * . Oplog class uses this File object to obtain the RandomAccessFile object. Before returning the
    * File object , the dat present in the buffers of the RandomAccessFile object is flushed.
    * Otherwise, for windows the actual file length does not match with the File size obtained from
    * the File object
-   * 
-   * @throws IOException
-   * @throws SyncFailedException
    */
   File getOplogFile() throws SyncFailedException, IOException {
     synchronized (this.crf) {
@@ -305,7 +240,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
    *        present. @param faultingIn @param bitOnly boolean indicating whether to extract just the
    *        UserBit or UserBit with value @return BytesAndBits object wrapping the value & user bit
    */
-  public final BytesAndBits getBytesAndBits(DiskRegionView dr, DiskId id, boolean faultingIn,
+  public BytesAndBits getBytesAndBits(DiskRegionView dr, DiskId id, boolean faultingIn,
       boolean bitOnly) {
     OverflowOplog retryOplog = null;
     long offset = 0;
@@ -326,22 +261,19 @@ class OverflowOplog implements CompactableOplog, Flushable {
     BytesAndBits bb = null;
     long start = this.stats.startRead();
 
-    // Asif: If the offset happens to be -1, still it is possible that
+    // If the offset happens to be -1, still it is possible that
     // the data is present in the current oplog file.
     if (offset == -1) {
-      // Asif: Since it is given that a get operation has alreadty
+      // Since it is given that a get operation has alreadty
       // taken a
       // lock on an entry , no put operation could have modified the
       // oplog ID
       // there fore synchronization is not needed
       // synchronized (id) {
-      // if (id.getOplogId() == this.oplogId) {
       offset = id.getOffsetInOplog();
-      // }
-      // }
     }
 
-    // Asif :If the current OpLog is not destroyed ( its opLogRaf file
+    // If the current OpLog is not destroyed ( its opLogRaf file
     // is still open) we can retrieve the value from this oplog.
     try {
       bb = basicGet(dr, offset, bitOnly, id.getValueLength(), id.getUserBits());
@@ -351,22 +283,18 @@ class OverflowOplog implements CompactableOplog, Flushable {
           id), dae);
       throw dae;
     }
-    // Asif: If bb is still null then entry has been compacted to the Htree
+    // If bb is still null then entry has been compacted to the Htree
     // or in case of concurrent get & put , to a new OpLog ( Concurrent Get
     // &
     // Put is not possible at this point).
-    // Asif: Since the compacter takes a lock on Entry as well as DiskId , the
+    // Since the compacter takes a lock on Entry as well as DiskId , the
     // situation below
     // will not be possible and hence commenting the code
-    /*
-     * if (bb == null) { // TODO: added by mitul, remove it later Assert.assertTrue(id.getOplogId()
-     * != this.oplogId);
-     */
 
     if (bb == null) {
       throw new EntryDestroyedException(
           LocalizedStrings.Oplog_NO_VALUE_WAS_FOUND_FOR_ENTRY_WITH_DISK_ID_0_ON_A_REGION_WITH_SYNCHRONOUS_WRITING_SET_TO_1
-              .toLocalizedString(new Object[] {id, Boolean.valueOf(dr.isSync())}));
+              .toLocalizedString(new Object[] {id, dr.isSync()}));
     }
     if (bitOnly) {
       dr.endRead(start, this.stats.endRead(start, 1), 1);
@@ -374,7 +302,6 @@ class OverflowOplog implements CompactableOplog, Flushable {
       dr.endRead(start, this.stats.endRead(start, bb.getBytes().length), bb.getBytes().length);
     }
     return bb;
-
   }
 
   /**
@@ -384,17 +311,14 @@ class OverflowOplog implements CompactableOplog, Flushable {
    * HTree with the oplog being destroyed
    * 
    * @param id A DiskId object for which the value on disk will be fetched
-   * 
    */
-  public final BytesAndBits getNoBuffer(DiskRegion dr, DiskId id) {
+  public BytesAndBits getNoBuffer(DiskRegion dr, DiskId id) {
     if (logger.isTraceEnabled()) {
       logger.trace("Oplog::getNoBuffer:Before invoking Oplog.basicGet for DiskID ={}", id);
     }
 
     try {
-      BytesAndBits bb =
-          basicGet(dr, id.getOffsetInOplog(), false, id.getValueLength(), id.getUserBits());
-      return bb;
+      return basicGet(dr, id.getOffsetInOplog(), false, id.getValueLength(), id.getUserBits());
     } catch (DiskAccessException dae) {
       logger.error(LocalizedMessage.create(
           LocalizedStrings.Oplog_OPLOGGETNOBUFFEREXCEPTION_IN_RETRIEVING_VALUE_FROM_DISK_FOR_DISKID_0,
@@ -415,7 +339,6 @@ class OverflowOplog implements CompactableOplog, Flushable {
   /**
    * Call this when the cache is closed or region is destroyed. Deletes the lock files and if it is
    * Overflow only, deletes the oplog file as well
-   * 
    */
   public void close() {
     if (this.closed) {
@@ -525,21 +448,16 @@ class OverflowOplog implements CompactableOplog, Flushable {
   /**
    * Modifies a key/value pair from a region entry on disk. Updates all of the necessary
    * {@linkplain DiskStoreStats statistics} and invokes basicModify
+   * <p>
+   * Modified the code so as to reuse the already created ByteBuffer during transition. Minimizing
+   * the synchronization allowing multiple put operations for different entries to proceed
+   * concurrently for asynch mode
    * 
    * @param entry DiskEntry object representing the current Entry
    * @param value byte array representing the value
-   * 
-   * @throws DiskAccessException
-   * @throws IllegalStateException
-   */
-  /*
-   * Asif: Modified the code so as to reuse the already created ByteBuffer during transition.
-   * Minimizing the synchronization allowing multiple put operations for different entries to
-   * proceed concurrently for asynch mode
-   * 
    * @return true if modify was done; false if this file did not have room
    */
-  public final boolean modify(DiskRegion dr, DiskEntry entry, ValueWrapper value, boolean async) {
+  public boolean modify(DiskRegion dr, DiskEntry entry, ValueWrapper value, boolean async) {
     try {
       byte userBits = calcUserBits(value);
       return basicModify(entry, value, userBits, async);
@@ -557,7 +475,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
     }
   }
 
-  public final boolean copyForwardForOverflowCompact(DiskEntry entry, byte[] value, int length,
+  public boolean copyForwardForOverflowCompact(DiskEntry entry, byte[] value, int length,
       byte userBits) {
     try {
       ValueWrapper vw = new DiskEntry.Helper.CompactorValueWrapper(value, length);
@@ -578,15 +496,13 @@ class OverflowOplog implements CompactableOplog, Flushable {
 
 
   /**
-   * Asif: A helper function which identifies whether to modify the entry in the current oplog or to
-   * make the switch to the next oplog. This function enables us to reuse the byte buffer which got
+   * A helper function which identifies whether to modify the entry in the current oplog or to make
+   * the switch to the next oplog. This function enables us to reuse the byte buffer which got
    * created for an oplog which no longer permits us to use itself. It will also take acre of
    * compaction if required
    * 
    * @param entry DiskEntry object representing the current Entry
    * @return true if modify was done; false if this file did not have room
-   * @throws IOException
-   * @throws InterruptedException
    */
   private boolean basicModify(DiskEntry entry, ValueWrapper value, byte userBits, boolean async)
       throws IOException, InterruptedException {
@@ -654,7 +570,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
    * 
    * @param entry DiskEntry object on which remove operation is called
    */
-  public final void remove(DiskRegion dr, DiskEntry entry) {
+  public void remove(DiskRegion dr, DiskEntry entry) {
     try {
       basicRemove(dr, entry);
     } catch (IOException ex) {
@@ -672,14 +588,11 @@ class OverflowOplog implements CompactableOplog, Flushable {
   }
 
   /**
-   * 
-   * Asif: A helper function which identifies whether to record a removal of entry in the current
-   * oplog or to make the switch to the next oplog. This function enables us to reuse the byte
-   * buffer which got created for an oplog which no longer permits us to use itself.
+   * A helper function which identifies whether to record a removal of entry in the current oplog or
+   * to make the switch to the next oplog. This function enables us to reuse the byte buffer which
+   * got created for an oplog which no longer permits us to use itself.
    * 
    * @param entry DiskEntry object representing the current Entry
-   * @throws IOException
-   * @throws InterruptedException
    */
   private void basicRemove(DiskRegion dr, DiskEntry entry)
       throws IOException, InterruptedException {
@@ -700,23 +613,17 @@ class OverflowOplog implements CompactableOplog, Flushable {
     }
   }
 
-
-  // /**
-  // * This is only used for an assertion check.
-  // */
-  // private long lastWritePos = -1;
-
   /**
    * test hook
    */
-  public final ByteBuffer getWriteBuf() {
+  public ByteBuffer getWriteBuf() {
     return this.crf.writeBuf;
   }
 
   private static final int MAX_CHANNEL_RETRIES = 5;
 
   @Override
-  public final void flush() throws IOException {
+  public void flush() throws IOException {
     final OplogFile olf = this.crf;
     synchronized (olf) {
       if (olf.RAFClosed) {
@@ -780,7 +687,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
   }
 
   @Override
-  public final void flush(ByteBuffer b1, ByteBuffer b2) throws IOException {
+  public void flush(ByteBuffer b1, ByteBuffer b2) throws IOException {
     final OplogFile olf = this.crf;
     synchronized (olf) {
       if (olf.RAFClosed) {
@@ -809,7 +716,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
     }
   }
 
-  public final void flushAll() {
+  public void flushAll() {
     try {
       flush();
     } catch (IOException ex) {
@@ -820,13 +727,13 @@ class OverflowOplog implements CompactableOplog, Flushable {
   }
 
   /**
-   * Asif: Since the ByteBuffer being writen to can have additional bytes which are used for
-   * extending the size of the file, it is necessary that the ByteBuffer provided should have limit
-   * which is set to the position till which it contains the actual bytes. If the mode is synched
-   * write then only we will write up to the capacity & opLogSpace variable have any meaning. For
-   * asynch mode it will be zero. Also this method must be synchronized on the file , whether we use
-   * synch or asynch write because the fault in operations can clash with the asynch writing. Write
-   * the specified bytes to the oplog. Note that since extending a file is expensive this code will
+   * Since the ByteBuffer being writen to can have additional bytes which are used for extending the
+   * size of the file, it is necessary that the ByteBuffer provided should have limit which is set
+   * to the position till which it contains the actual bytes. If the mode is synched write then only
+   * we will write up to the capacity & opLogSpace variable have any meaning. For asynch mode it
+   * will be zero. Also this method must be synchronized on the file , whether we use synch or
+   * asynch write because the fault in operations can clash with the asynch writing. Write the
+   * specified bytes to the oplog. Note that since extending a file is expensive this code will
    * possibly write OPLOG_EXTEND_SIZE zero bytes to reduce the number of times the file is extended.
    * 
    *
@@ -843,7 +750,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
         Assert.assertTrue(false, toString() + " for store " + this.parent.getName()
             + " has been closed for synch mode while writing is going on. This should not happen");
       }
-      // Asif : It is assumed that the file pointer is already at the
+      // It is assumed that the file pointer is already at the
       // appropriate position in the file so as to allow writing at the end.
       // Any fault in operations will set the pointer back to the write location.
       // Also it is only in case of synch writing, we are writing more
@@ -892,30 +799,24 @@ class OverflowOplog implements CompactableOplog, Flushable {
   private BytesAndBits attemptGet(DiskRegionView dr, long offsetInOplog, int valueLength,
       byte userBits) throws IOException {
     synchronized (this.crf) {
-      // if (this.closed || this.deleted.get()) {
-      // throw new DiskAccessException("attempting get on "
-      // + (this.deleted.get() ? "destroyed" : "closed")
-      // + " oplog #" + getOplogId(), this.owner);
-      // }
-      final long readPosition = offsetInOplog;
-      assert readPosition >= 0;
+      assert offsetInOplog >= 0;
       RandomAccessFile myRAF = this.crf.raf;
       BytesAndBits bb = null;
       long writePosition = 0;
       if (!this.doneAppending) {
         writePosition = myRAF.getFilePointer();
-        bb = attemptWriteBufferGet(writePosition, readPosition, valueLength, userBits);
+        bb = attemptWriteBufferGet(writePosition, offsetInOplog, valueLength, userBits);
         if (bb == null) {
           if (/*
                * !getParent().isSync() since compactor groups writes &&
-               */ (readPosition + valueLength) > this.crf.bytesFlushed && !this.closed) {
+               */ (offsetInOplog + valueLength) > this.crf.bytesFlushed && !this.closed) {
             flushAll(); // fix for bug 41205
             writePosition = myRAF.getFilePointer();
           }
         }
       }
       if (bb == null) {
-        myRAF.seek(readPosition);
+        myRAF.seek(offsetInOplog);
         try {
           this.stats.incOplogSeeks();
           byte[] valueBytes = new byte[valueLength];
@@ -965,7 +866,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
   }
 
   /**
-   * Asif: Extracts the Value byte array & UserBit from the OpLog
+   * Extracts the Value byte array & UserBit from the OpLog
    * 
    * @param offsetInOplog The starting position from which to read the data in the opLog
    * @param bitOnly boolean indicating whether the value needs to be extracted along with the
@@ -996,7 +897,8 @@ class OverflowOplog implements CompactableOplog, Flushable {
           try {
             bb = attemptGet(dr, offsetInOplog, valueLength, userBits);
             break;
-          } catch (InterruptedIOException e) { // bug 39756
+          } catch (InterruptedIOException ignore) {
+            // bug 39756
             // ignore, we'll clear and retry.
           } finally {
             if (interrupted) {
@@ -1007,10 +909,8 @@ class OverflowOplog implements CompactableOplog, Flushable {
       } catch (IOException ex) {
         throw new DiskAccessException(
             LocalizedStrings.Oplog_FAILED_READING_FROM_0_OPLOGID_1_OFFSET_BEING_READ_2_CURRENT_OPLOG_SIZE_3_ACTUAL_FILE_SIZE_4_IS_ASYNCH_MODE_5_IS_ASYNCH_WRITER_ALIVE_6
-                .toLocalizedString(new Object[] {this.diskFile.getPath(),
-                    Long.valueOf(this.oplogId), Long.valueOf(offsetInOplog),
-                    Long.valueOf(this.crf.currSize), Long.valueOf(this.crf.bytesFlushed),
-                    Boolean.valueOf(!dr.isSync()), Boolean.valueOf(false)}),
+                .toLocalizedString(this.diskFile.getPath(), (long) this.oplogId, offsetInOplog,
+                    this.crf.currSize, this.crf.bytesFlushed, !dr.isSync(), false),
             ex, dr.getName());
       } catch (IllegalStateException ex) {
         checkClosed();
@@ -1082,6 +982,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
    * The HighWaterMark of recentValues.
    */
   private final AtomicLong totalCount = new AtomicLong(0);
+
   /**
    * The number of records in this oplog that contain the most recent value of the entry.
    */
@@ -1146,8 +1047,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
         tlc = 0;
       }
       double rv = tlc;
-      double rvHWM = rvHWMtmp;
-      if (((rv / rvHWM) * 100) <= parent.getCompactionThreshold()) {
+      if (((rv / (double) rvHWMtmp) * 100) <= parent.getCompactionThreshold()) {
         return true;
       }
     } else {
@@ -1199,10 +1099,6 @@ class OverflowOplog implements CompactableOplog, Flushable {
     getOplogSet().addOverflowToBeCompacted(this);
   }
 
-  private GemFireCacheImpl getGemFireCache() {
-    return this.parent.getCache();
-  }
-
 
   long testGetOplogFileLength() throws IOException {
     long result = 0;
@@ -1212,29 +1108,10 @@ class OverflowOplog implements CompactableOplog, Flushable {
     return result;
   }
 
-  private final OplogFile getOLF() {
+  private OplogFile getOLF() {
     return this.crf;
   }
 
-  // // Comparable code //
-  // public int compareTo(Oplog o) {
-  // return getOplogId() - o.getOplogId();
-  // }
-  // public boolean equals(Object o) {
-  // if (o instanceof Oplog) {
-  // return compareTo((Oplog)o) == 0;
-  // } else {
-  // return false;
-  // }
-  // }
-  // public int hashCode() {
-  // return getOplogId();
-  // }
-
-  // //////// Methods used during recovery //////////////
-
-  // ////////////////////Inner Classes //////////////////////
-
   private static class OplogFile {
     public File f;
     public RandomAccessFile raf;
@@ -1251,14 +1128,17 @@ class OverflowOplog implements CompactableOplog, Flushable {
    */
   private class OpState {
     private byte userBits;
+
     /**
      * How many bytes it will be when serialized
      */
     private int size;
+
     private boolean needsValue;
+
     private ValueWrapper value;
 
-    public final int getSize() {
+    public int getSize() {
       return this.size;
     }
 
@@ -1269,7 +1149,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
       this.value = null;
     }
 
-    private final void write(ValueWrapper vw) throws IOException {
+    private void write(ValueWrapper vw) throws IOException {
       vw.sendTo(getOLF().writeBuf, OverflowOplog.this);
     }
 
@@ -1329,7 +1209,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
     this.compacting = true;
   }
 
-  private final static ThreadLocal isCompactorThread = new ThreadLocal();
+  private static final ThreadLocal isCompactorThread = new ThreadLocal();
 
   private boolean calledByCompactorThread() {
     if (!this.compacting)
@@ -1361,7 +1241,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
           handleNoLiveValues();
           return 0;
         }
-        // Asif:Start with a fresh wrapper on every compaction so that
+        // Start with a fresh wrapper on every compaction so that
         // if previous run used some high memory byte array which was
         // exceptional, it gets garbage collected.
         long opStart = getStats().getStatTime();
@@ -1433,7 +1313,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
             totalCount++;
             getStats().endCompactionUpdate(opStart);
             opStart = getStats().getStatTime();
-            // Asif: Check if the value byte array happens to be any of the constant
+            // Check if the value byte array happens to be any of the constant
             // static byte arrays or references the value byte array of underlying RegionEntry.
             // If so for preventing data corruption across regions
             // ( in case of static byte arrays) & for RegionEntry,
@@ -1459,10 +1339,10 @@ class OverflowOplog implements CompactableOplog, Flushable {
   }
 
   /**
-   * Asif:This function retrieves the value for an entry being compacted subject to entry
-   * referencing the oplog being compacted. Attempt is made to retrieve the value from in memory ,
-   * if available, else from asynch buffers ( if asynch mode is enabled), else from the Oplog being
-   * compacted. It is invoked from switchOplog as well as OplogCompactor's compact function.
+   * This function retrieves the value for an entry being compacted subject to entry referencing the
+   * oplog being compacted. Attempt is made to retrieve the value from in memory , if available,
+   * else from asynch buffers ( if asynch mode is enabled), else from the Oplog being compacted. It
+   * is invoked from switchOplog as well as OplogCompactor's compact function.
    * 
    * @param entry DiskEntry being compacted referencing the Oplog being compacted
    * @param wrapper Object of type BytesAndBitsForCompactor. The data if found is set in the wrapper
@@ -1477,7 +1357,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
     long oplogOffset = did.getOffsetInOplog();
     boolean foundData = false;
     if (entry.isValueNull()) {
-      // Asif: If the mode is synch it is guaranteed to be present in the disk
+      // If the mode is synch it is guaranteed to be present in the disk
       foundData = basicGetForCompactor(oplogOffset, false, did.getValueLength(), did.getUserBits(),
           wrapper);
       // after we have done the get do one more check to see if the
@@ -1518,8 +1398,8 @@ class OverflowOplog implements CompactableOplog, Flushable {
   }
 
   /**
-   * Asif: Extracts the Value byte array & UserBit from the OpLog and inserts it in the wrapper
-   * Object of type BytesAndBitsForCompactor which is passed
+   * Extracts the Value byte array & UserBit from the OpLog and inserts it in the wrapper Object of
+   * type BytesAndBitsForCompactor which is passed
    * 
    * @param offsetInOplog The starting position from which to read the data in the opLog
    * @param bitOnly boolean indicating whether the value needs to be extracted along with the
@@ -1547,33 +1427,26 @@ class OverflowOplog implements CompactableOplog, Flushable {
     } else {
       try {
         synchronized (this.crf) {
-          final long readPosition = offsetInOplog;
           if (/*
                * !getParent().isSync() since compactor groups writes &&
-               */ (readPosition + valueLength) > this.crf.bytesFlushed && !this.closed) {
+               */ (offsetInOplog + valueLength) > this.crf.bytesFlushed && !this.closed) {
             flushAll(); // fix for bug 41205
           }
           final long writePosition =
               (this.doneAppending) ? this.crf.bytesFlushed : this.crf.raf.getFilePointer();
-          if ((readPosition + valueLength) > writePosition) {
+          if ((offsetInOplog + valueLength) > writePosition) {
             throw new DiskAccessException(
                 LocalizedStrings.Oplog_TRIED_TO_SEEK_TO_0_BUT_THE_FILE_LENGTH_IS_1_OPLOG_FILE_OBJECT_USED_FOR_READING_2
-                    .toLocalizedString(
-                        new Object[] {readPosition + valueLength, writePosition, this.crf.raf}),
+                    .toLocalizedString(offsetInOplog + valueLength, writePosition, this.crf.raf),
                 getParent().getName());
-          } else if (readPosition < 0) {
+          } else if (offsetInOplog < 0) {
             throw new DiskAccessException(
                 LocalizedStrings.Oplog_CANNOT_FIND_RECORD_0_WHEN_READING_FROM_1.toLocalizedString(
-                    new Object[] {Long.valueOf(offsetInOplog), this.diskFile.getPath()}),
+                    offsetInOplog, this.diskFile.getPath()),
                 getParent().getName());
           }
-          // if (this.closed || this.deleted.get()) {
-          // throw new DiskAccessException("attempting get on "
-          // + (this.deleted.get() ? "destroyed" : "closed")
-          // + " oplog #" + getOplogId(), this.owner);
-          // }
           try {
-            this.crf.raf.seek(readPosition);
+            this.crf.raf.seek(offsetInOplog);
             this.stats.incOplogSeeks();
             byte[] valueBytes = null;
             if (wrapper.getBytes().length < valueLength) {
@@ -1601,10 +1474,8 @@ class OverflowOplog implements CompactableOplog, Flushable {
       } catch (IOException ex) {
         throw new DiskAccessException(
             LocalizedStrings.Oplog_FAILED_READING_FROM_0_OPLOG_DETAILS_1_2_3_4_5_6
-                .toLocalizedString(new Object[] {this.diskFile.getPath(),
-                    Long.valueOf(this.oplogId), Long.valueOf(offsetInOplog),
-                    Long.valueOf(this.crf.currSize), Long.valueOf(this.crf.bytesFlushed),
-                    Boolean.valueOf(/* !dr.isSync() @todo */false), Boolean.valueOf(false)}),
+                .toLocalizedString(this.diskFile.getPath(), (long) this.oplogId, offsetInOplog,
+                    this.crf.currSize, this.crf.bytesFlushed, false, false),
             ex, getParent().getName());
 
       } catch (IllegalStateException ex) {

http://git-wip-us.apache.org/repos/asf/geode/blob/654d65b5/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
index 6fc4ba0..f8e2108 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
@@ -22,7 +22,6 @@ import org.apache.geode.cache.PartitionedRegionStorageException;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionDestroyedException;
 import org.apache.geode.cache.persistence.PartitionOfflineException;
-import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.distributed.internal.DistributionConfig;
@@ -1394,7 +1393,7 @@ public class PRHARedundancyProvider {
       chosen = 0;
     } else {
       // Pick one (at random)
-      chosen = PartitionedRegion.rand.nextInt(bestStores.size());
+      chosen = PartitionedRegion.RANDOM.nextInt(bestStores.size());
     }
     DataStoreBuckets aDataStore = bestStores.get(chosen);
     return aDataStore.memberId;
@@ -1524,7 +1523,7 @@ public class PRHARedundancyProvider {
   public void scheduleRedundancyRecovery(Object failedMemId) {
 
     final boolean isStartup = failedMemId == null ? true : false;
-    final GemFireCacheImpl cache = this.prRegion.getCache();
+    final InternalCache cache = this.prRegion.getCache();
     final int redundantCopies = PRHARedundancyProvider.this.prRegion.getRedundantCopies();
     final long delay;
     final boolean movePrimaries;

http://git-wip-us.apache.org/repos/asf/geode/blob/654d65b5/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java
index 70d4e2c..0745975 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java
@@ -14,13 +14,42 @@
  */
 package org.apache.geode.internal.cache;
 
+import static java.lang.Integer.*;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.InternalGemFireException;
 import org.apache.geode.cache.CacheRuntimeException;
 import org.apache.geode.cache.RegionDestroyedException;
 import org.apache.geode.cache.query.QueryException;
 import org.apache.geode.cache.query.QueryInvocationTargetException;
 import org.apache.geode.cache.query.SelectResults;
-import org.apache.geode.cache.query.internal.*;
+import org.apache.geode.cache.query.internal.CompiledSelect;
+import org.apache.geode.cache.query.internal.DefaultQuery;
+import org.apache.geode.cache.query.internal.ExecutionContext;
+import org.apache.geode.cache.query.internal.IndexTrackingQueryObserver;
+import org.apache.geode.cache.query.internal.NWayMergeResults;
+import org.apache.geode.cache.query.internal.QueryExecutionContext;
+import org.apache.geode.cache.query.internal.QueryMonitor;
+import org.apache.geode.cache.query.internal.QueryObserver;
+import org.apache.geode.cache.query.internal.QueryObserverHolder;
 import org.apache.geode.cache.query.types.ObjectType;
 import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.internal.Assert;
@@ -30,32 +59,22 @@ import org.apache.geode.internal.cache.PartitionedRegionQueryEvaluator.PRQueryRe
 import org.apache.geode.internal.cache.execute.BucketMovedException;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.logging.LogService;
-import org.apache.logging.log4j.Logger;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.*;
-
 
 /**
  * This class takes the responsibility of executing the query on a data store for the buckets
- * specified in bucketList. It contains a <code>PRQueryExecutor</code> thread-pool executor that
- * takes a <code>Callable</code> task identified by <code>PartitionedRegion</code>, queryString and
- * bucketId.
+ * specified in bucketList. It contains a {@code PRQueryExecutor} thread-pool executor that takes a
+ * {@code Callable} task identified by {@code PartitionedRegion}, queryString and bucketId.
  * 
  * The QueryTasks add results directly to a results queue. The BucketQueryResult is used not only to
  * indicate completion, and holds an exception if there one occurred while processing a query.
- *
  */
 public class PRQueryProcessor {
   private static final Logger logger = LogService.getLogger();
 
   final static int BUCKET_QUERY_TIMEOUT = 60;
 
-  public final static int NUM_THREADS = Integer
-      .getInteger(DistributionConfig.GEMFIRE_PREFIX + "PRQueryProcessor.numThreads", 1).intValue();
+  public final static int NUM_THREADS =
+      getInteger(DistributionConfig.GEMFIRE_PREFIX + "PRQueryProcessor.numThreads", 1);
 
   /* For Test purpose */
   public static int TEST_NUM_THREADS = 0;
@@ -69,14 +88,13 @@ public class PRQueryProcessor {
   private volatile ObjectType resultType = null;
 
   private boolean isIndexUsedForLocalQuery = false;
-  // private List _failedBuckets;
 
   public PRQueryProcessor(PartitionedRegionDataStore prDS, DefaultQuery query, Object[] parameters,
       List<Integer> buckets) {
     Assert.assertTrue(!buckets.isEmpty(), "bucket list can not be empty. ");
     this._prds = prDS;
     this._bucketsToQuery = buckets;
-    ((GemFireCacheImpl) prDS.partitionedRegion.getCache()).getLocalQueryService();
+    prDS.partitionedRegion.getCache().getLocalQueryService();
     this.query = query;
     this.parameters = parameters;
     PRQueryExecutor.initializeExecutorService();
@@ -104,7 +122,6 @@ public class PRQueryProcessor {
    * Executes a pre-compiled query on a data store. Adds result objects to resultQueue
    * 
    * @return boolean true if the result is a struct type
-   * @throws QueryException
    * @throws ForceReattemptException if query should be tried again
    */
   public boolean executeQuery(Collection<Collection> resultCollector)
@@ -115,7 +132,7 @@ public class PRQueryProcessor {
     // ((IndexTrackingQueryObserver)observer).setIndexInfo(resultCollector.getIndexInfoMap());
     // }
 
-    if (NUM_THREADS > 1 || this.TEST_NUM_THREADS > 1) {
+    if (NUM_THREADS > 1 || TEST_NUM_THREADS > 1) {
       executeWithThreadPool(resultCollector);
     } else {
       executeSequentially(resultCollector, this._bucketsToQuery);
@@ -139,7 +156,6 @@ public class PRQueryProcessor {
       try {
         futures = execService.invokeAll(callableTasks, 300, TimeUnit.SECONDS);
       } catch (RejectedExecutionException rejectedExecutionEx) {
-        // this._prds.partitionedRegion.checkReadiness();
         throw rejectedExecutionEx;
       }
 
@@ -166,7 +182,7 @@ public class PRQueryProcessor {
           } catch (TimeoutException e) {
             throw new InternalGemFireException(
                 LocalizedStrings.PRQueryProcessor_TIMED_OUT_WHILE_EXECUTING_QUERY_TIME_EXCEEDED_0
-                    .toLocalizedString(Integer.valueOf(BUCKET_QUERY_TIMEOUT)),
+                    .toLocalizedString(BUCKET_QUERY_TIMEOUT),
                 e);
           } catch (ExecutionException ee) {
             Throwable cause = ee.getCause();
@@ -217,8 +233,8 @@ public class PRQueryProcessor {
         if (pr.isLocallyDestroyed || pr.isClosed) {
           throw new RegionDestroyedException("PR destroyed during query", pr.getFullPath());
         } else {
-          throw new ForceReattemptException("Bucket id " + pr.bucketStringForLogs(bId.intValue())
-              + " not found on VM " + pr.getMyId());
+          throw new ForceReattemptException(
+              "Bucket id " + pr.bucketStringForLogs(bId) + " not found on VM " + pr.getMyId());
         }
       }
       bukRegion.waitForData();
@@ -254,7 +270,7 @@ public class PRQueryProcessor {
                 // Avoid if query is distinct as this Integer could be a region value.
                 if (!query.getSimpleSelect().isDistinct() && query.getSimpleSelect().isCount()
                     && r instanceof Integer) {
-                  if (((Integer) r).intValue() != 0) {
+                  if ((Integer) r != 0) {
                     rq.put(r);
                   }
                 } else {
@@ -268,7 +284,7 @@ public class PRQueryProcessor {
               }
             }
           }
-          rq.put(new EndOfBucket(bId.intValue()));
+          rq.put(new EndOfBucket(bId));
           this.incNumBucketsProcessed();
           return; // success
         }
@@ -298,8 +314,8 @@ public class PRQueryProcessor {
         throw new RegionDestroyedException("PR destroyed during query", pr.getFullPath());
       }
       pr.checkReadiness();
-      throw new ForceReattemptException("Bucket id " + pr.bucketStringForLogs(bId.intValue())
-          + " not found on VM " + pr.getMyId());
+      throw new ForceReattemptException(
+          "Bucket id " + pr.bucketStringForLogs(bId) + " not found on VM " + pr.getMyId());
     }
   }
 
@@ -342,9 +358,8 @@ public class PRQueryProcessor {
       }
     }
 
-    NWayMergeResults mergedResults = new NWayMergeResults(sortedResults, cs.isDistinct(), limit,
-        cs.getOrderByAttrs(), context, cs.getElementTypeForOrderByQueries());
-    return mergedResults;
+    return new NWayMergeResults(sortedResults, cs.isDistinct(), limit, cs.getOrderByAttrs(),
+        context, cs.getElementTypeForOrderByQueries());
 
   }
 
@@ -367,15 +382,10 @@ public class PRQueryProcessor {
       Object results = query.executeUsingContext(context);
 
       synchronized (resultCollector) {
-        // TODO:Asif: In what situation would the results object itself be undefined?
+        // TODO: In what situation would the results object itself be undefined?
         // The elements of the results can be undefined , but not the resultset itself
-        /*
-         * if (results == QueryService.UNDEFINED) {
-         * resultCollector.add(Collections.singleton(results)); } else {
-         */
         this.resultType = ((SelectResults) results).getCollectionType().getElementType();
-        resultCollector.add((SelectResults) results);
-        // }
+        resultCollector.add((Collection) results);
       }
       isIndexUsedForLocalQuery = ((QueryExecutionContext) context).isIndexUsed();
 
@@ -435,7 +445,7 @@ public class PRQueryProcessor {
 
     /**
      * Closes the executor service. This is called from
-     * {@link PartitionedRegion#afterRegionsClosedByCacheClose(GemFireCacheImpl)}
+     * {@link PartitionedRegion#afterRegionsClosedByCacheClose(InternalCache)}
      */
     static synchronized void shutdown() {
       if (execService != null) {
@@ -541,8 +551,7 @@ public class PRQueryProcessor {
           // ((IndexTrackingQueryObserver)observer).setIndexInfo(resultColl.getIndexInfoMap());
         }
 
-        final Integer bId = Integer.valueOf(this._bucketId);
-        List<Integer> bucketList = Collections.singletonList(bId);
+        List<Integer> bucketList = Collections.singletonList(this._bucketId);
         ExecutionContext context =
             new QueryExecutionContext(this.parameters, pr.getCache(), this.query);
         context.setBucketList(bucketList);
@@ -571,11 +580,6 @@ public class PRQueryProcessor {
       private Exception _ex = null;
       public boolean retry = false;
 
-      /**
-       * Constructor
-       * 
-       * @param bukId
-       */
       public BucketQueryResult(int bukId) {
         this._buk = bukId;
       }
@@ -593,7 +597,7 @@ public class PRQueryProcessor {
       }
 
       public Integer getBucketId() {
-        return Integer.valueOf(this._buk);
+        return valueOf(this._buk);
       }
 
       public boolean isReattemptNeeded() {

http://git-wip-us.apache.org/repos/asf/geode/blob/654d65b5/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionAttributesImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionAttributesImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionAttributesImpl.java
index a25d4ca..161562a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionAttributesImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionAttributesImpl.java
@@ -12,9 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-/**
- * 
- */
 package org.apache.geode.internal.cache;
 
 import java.io.DataInput;
@@ -182,13 +179,15 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
   }
 
   /**
-   * Constructs an instance of <code>PartitionAttributes</code> with default settings.
+   * Constructs an instance of {@code PartitionAttributes} with default settings.
    * 
    * @see PartitionAttributesFactory
    */
-  public PartitionAttributesImpl() {}
-
+  public PartitionAttributesImpl() {
+    // do nothing
+  }
 
+  @Override
   public PartitionResolver getPartitionResolver() {
     return this.partitionResolver;
   }
@@ -211,31 +210,22 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
     this.hasPartitionListeners = true;
   }
 
-  // public ExpirationAttributes getEntryTimeToLive()
-  // {
-  // return new ExpirationAttributes(this.entryTimeToLiveExpiration.getTimeout(),
-  // this.entryTimeToLiveExpiration.getAction());
-  // }
-  //
-  // public ExpirationAttributes getEntryIdleTimeout()
-  // {
-  // return new ExpirationAttributes(this.entryIdleTimeoutExpiration.getTimeout(),
-  // this.entryIdleTimeoutExpiration.getAction());
-  // }
-
+  @Override
   public int getRedundantCopies() {
     return this.redundancy;
   }
 
+  @Override
   public int getTotalNumBuckets() {
     return this.totalNumBuckets;
   }
 
-  // deprecated method
+  @Override
   public long getTotalSize() {
     return this.getTotalMaxMemory();
   }
 
+  @Override
   public long getTotalMaxMemory() {
     return this.totalMaxMemory;
   }
@@ -253,10 +243,12 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
    *         DistributedSystem has not yet been created)
    * @see #getLocalMaxMemoryForValidation()
    */
+  @Override
   public int getLocalMaxMemory() {
     if (this.offHeap && !this.localMaxMemoryExists) {
       int value = computeOffHeapLocalMaxMemory();
-      if (this.localMaxMemoryExists) { // real value now exists so set it and return
+      if (this.localMaxMemoryExists) {
+        // real value now exists so set it and return
         this.localMaxMemory = value;
       }
     }
@@ -269,8 +261,8 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
    *         DistributedSystem has not yet been created)
    */
   private void checkLocalMaxMemoryExists() {
-    if (this.offHeap && !this.localMaxMemoryExists) { // real value does NOT yet exist so throw
-                                                      // IllegalStateException
+    if (this.offHeap && !this.localMaxMemoryExists) {
+      // real value does NOT yet exist so throw IllegalStateException
       throw new IllegalStateException(
           "Attempting to use localMaxMemory for off-heap but value is not yet known (default value is equal to off-heap-memory-size)");
     }
@@ -295,32 +287,39 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
     return this.localMaxMemory;
   }
 
+  @Override
   public String getColocatedWith() {
     return this.colocatedRegionName;
   }
 
+  @Override
   public Properties getLocalProperties() {
     return this.localProperties;
   }
 
+  @Override
   public Properties getGlobalProperties() {
     return this.globalProperties;
   }
 
+  @Override
   public long getStartupRecoveryDelay() {
     return startupRecoveryDelay;
   }
 
+  @Override
   public long getRecoveryDelay() {
     return recoveryDelay;
   }
 
+  @Override
   public List<FixedPartitionAttributesImpl> getFixedPartitionAttributes() {
     return this.fixedPAttrs;
   }
 
   private static final PartitionListener[] EMPTY_PARTITION_LISTENERS = new PartitionListener[0];
 
+  @Override
   public PartitionListener[] getPartitionListeners() {
     ArrayList<PartitionListener> listeners = this.partitionListeners;
     if (listeners == null) {
@@ -349,7 +348,7 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
         copy.partitionListeners = new ArrayList<PartitionListener>(copy.partitionListeners);
       }
       return copy;
-    } catch (CloneNotSupportedException e) {
+    } catch (CloneNotSupportedException ignore) {
       throw new InternalGemFireError(
           LocalizedStrings.PartitionAttributesImpl_CLONENOTSUPPORTEDEXCEPTION_THROWN_IN_CLASS_THAT_IMPLEMENTS_CLONEABLE
               .toLocalizedString());
@@ -362,8 +361,8 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
 
   @Override
   public String toString() {
-    StringBuffer s = new StringBuffer();
-    return s.append("PartitionAttributes@").append(System.identityHashCode(this))
+    StringBuilder sb = new StringBuilder();
+    return sb.append("PartitionAttributes@").append(System.identityHashCode(this))
         .append("[redundantCopies=").append(getRedundantCopies()).append(";localMaxMemory=")
         .append(getLocalMaxMemory()).append(";totalMaxMemory=").append(this.totalMaxMemory)
         .append(";totalNumBuckets=").append(this.totalNumBuckets).append(";partitionResolver=")
@@ -378,6 +377,7 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
    * @throws IllegalStateException if off-heap and the actual value is not yet known (because the
    *         DistributedSystem has not yet been created)
    */
+  @Override
   public void toData(DataOutput out) throws IOException {
     checkLocalMaxMemoryExists();
     out.writeInt(this.redundancy);
@@ -393,6 +393,7 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
     DataSerializer.writeObject(this.fixedPAttrs, out);
   }
 
+  @Override
   public void fromData(DataInput in) throws IOException, ClassNotFoundException {
     this.redundancy = in.readInt();
     this.totalMaxMemory = in.readLong();
@@ -436,8 +437,6 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
         || this.totalMaxMemory != other.getTotalMaxMemory()
         || this.startupRecoveryDelay != other.getStartupRecoveryDelay()
         || this.recoveryDelay != other.getRecoveryDelay()
-        // || ! this.localProperties.equals(other.getLocalProperties())
-        // || ! this.globalProperties.equals(other.getGlobalProperties())
         || ((this.partitionResolver == null) != (other.getPartitionResolver() == null))
         || (this.partitionResolver != null
             && !this.partitionResolver.equals(other.getPartitionResolver()))
@@ -447,7 +446,6 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
         || ((this.fixedPAttrs == null) != (other.getFixedPartitionAttributes() == null))
         || (this.fixedPAttrs != null
             && !this.fixedPAttrs.equals(other.getFixedPartitionAttributes()))) {
-      // throw new RuntimeException("this="+this.toString() + " other=" + other.toString());
       return false;
     }
 
@@ -457,12 +455,12 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
     if (otherPListeners.length != thisPListeners.length) {
       return false;
     }
-    Set<String> otherListenerClassName = new HashSet<String>();
+    Set<String> otherListenerClassName = new HashSet<>();
     for (int i = 0; i < otherPListeners.length; i++) {
       PartitionListener listener = otherPListeners[i];
       otherListenerClassName.add(listener.getClass().getName());
     }
-    Set<String> thisListenerClassName = new HashSet<String>();
+    Set<String> thisListenerClassName = new HashSet<>();
     for (int i = 0; i < thisPListeners.length; i++) {
       PartitionListener listener = thisPListeners[i];
       thisListenerClassName.add(listener.getClass().getName());
@@ -517,7 +515,7 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
     if (propVal != null) {
       try {
         setTotalMaxMemory(Integer.parseInt(propVal));
-      } catch (RuntimeException e) {
+      } catch (RuntimeException ignore) {
         this.totalMaxMemory = PartitionAttributesFactory.GLOBAL_MAX_MEMORY_DEFAULT;
       }
     }
@@ -525,7 +523,7 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
     if (propVal != null) {
       try {
         this.setTotalNumBuckets(Integer.parseInt(propVal));
-      } catch (RuntimeException e) {
+      } catch (RuntimeException ignore) {
         this.totalNumBuckets = PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_DEFAULT;
       }
     }
@@ -533,7 +531,7 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
 
   public void addFixedPartitionAttributes(FixedPartitionAttributes fpa) {
     if (this.fixedPAttrs == null) {
-      this.fixedPAttrs = new ArrayList<FixedPartitionAttributesImpl>(1);
+      this.fixedPAttrs = new ArrayList<>(1);
       this.fixedPAttrs.add((FixedPartitionAttributesImpl) fpa);
       this.hasFixedPAttrs = true;
     } else {
@@ -562,12 +560,12 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
     if ((this.totalNumBuckets <= 0)) {
       throw new IllegalStateException(
           LocalizedStrings.PartitionAttributesImpl_TOTALNUMBICKETS_0_IS_AN_ILLEGAL_VALUE_PLEASE_CHOOSE_A_VALUE_GREATER_THAN_0
-              .toLocalizedString(Integer.valueOf(this.totalNumBuckets)));
+              .toLocalizedString(this.totalNumBuckets));
     }
     if ((this.redundancy < 0) || (this.redundancy >= 4)) {
       throw new IllegalStateException(
           LocalizedStrings.PartitionAttributesImpl_REDUNDANTCOPIES_0_IS_AN_ILLEGAL_VALUE_PLEASE_CHOOSE_A_VALUE_BETWEEN_0_AND_3
-              .toLocalizedString(Integer.valueOf(this.redundancy)));
+              .toLocalizedString(this.redundancy));
     }
     for (Iterator it = this.getLocalProperties().keySet().iterator(); it.hasNext();) {
       String propName = (String) it.next();
@@ -660,29 +658,27 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
     Cache cache = GemFireCacheImpl.getInstance();
     if (cache != null) {
       Region<?, ?> region = cache.getRegion(this.colocatedRegionName);
-      {
-        if (region == null) {
-          throw new IllegalStateException(
-              LocalizedStrings.PartitionAttributesImpl_REGION_SPECIFIED_IN_COLOCATEDWITH_IS_NOT_PRESENT_IT_SHOULD_BE_CREATED_BEFORE_SETTING_COLOCATED_WITH_THIS_REGION
-                  .toLocalizedString());
-        }
-        if (!(region instanceof PartitionedRegion)) {
-          throw new IllegalStateException(
-              LocalizedStrings.PartitionAttributesImpl_SETTING_THE_ATTRIBUTE_COLOCATEDWITH_IS_SUPPORTED_ONLY_FOR_PARTITIONEDREGIONS
-                  .toLocalizedString());
-        }
-        PartitionedRegion colocatedRegion = (PartitionedRegion) region;
-        if (this.getTotalNumBuckets() != colocatedRegion.getPartitionAttributes()
-            .getTotalNumBuckets()) {
-          throw new IllegalStateException(
-              LocalizedStrings.PartitionAttributesImpl_CURRENT_PARTITIONEDREGIONS_TOTALNUMBUCKETS_SHOULD_BE_SAME_AS_TOTALNUMBUCKETS_OF_COLOCATED_PARTITIONEDREGION
-                  .toLocalizedString());
-        }
-        if (this.getRedundancy() != colocatedRegion.getPartitionAttributes().getRedundantCopies()) {
-          throw new IllegalStateException(
-              LocalizedStrings.PartitionAttributesImpl_CURRENT_PARTITIONEDREGIONS_REDUNDANCY_SHOULD_BE_SAME_AS_THE_REDUNDANCY_OF_COLOCATED_PARTITIONEDREGION
-                  .toLocalizedString());
-        }
+      if (region == null) {
+        throw new IllegalStateException(
+            LocalizedStrings.PartitionAttributesImpl_REGION_SPECIFIED_IN_COLOCATEDWITH_IS_NOT_PRESENT_IT_SHOULD_BE_CREATED_BEFORE_SETTING_COLOCATED_WITH_THIS_REGION
+                .toLocalizedString());
+      }
+      if (!(region instanceof PartitionedRegion)) {
+        throw new IllegalStateException(
+            LocalizedStrings.PartitionAttributesImpl_SETTING_THE_ATTRIBUTE_COLOCATEDWITH_IS_SUPPORTED_ONLY_FOR_PARTITIONEDREGIONS
+                .toLocalizedString());
+      }
+      PartitionedRegion colocatedRegion = (PartitionedRegion) region;
+      if (this.getTotalNumBuckets() != colocatedRegion.getPartitionAttributes()
+          .getTotalNumBuckets()) {
+        throw new IllegalStateException(
+            LocalizedStrings.PartitionAttributesImpl_CURRENT_PARTITIONEDREGIONS_TOTALNUMBUCKETS_SHOULD_BE_SAME_AS_TOTALNUMBUCKETS_OF_COLOCATED_PARTITIONEDREGION
+                .toLocalizedString());
+      }
+      if (this.getRedundancy() != colocatedRegion.getPartitionAttributes().getRedundantCopies()) {
+        throw new IllegalStateException(
+            LocalizedStrings.PartitionAttributesImpl_CURRENT_PARTITIONEDREGIONS_REDUNDANCY_SHOULD_BE_SAME_AS_THE_REDUNDANCY_OF_COLOCATED_PARTITIONEDREGION
+                .toLocalizedString());
       }
     }
   }
@@ -732,7 +728,7 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
   }
 
   @SuppressWarnings("unchecked")
-  public void setAll(@SuppressWarnings("rawtypes") PartitionAttributes pa) {
+  public void setAll(PartitionAttributes pa) {
     setRedundantCopies(pa.getRedundantCopies());
     setLocalProperties(pa.getLocalProperties());
     setGlobalProperties(pa.getGlobalProperties());
@@ -770,9 +766,8 @@ public class PartitionAttributesImpl implements PartitionAttributes, Cloneable,
           OffHeapStorage.parseOffHeapMemorySize(testAvailableOffHeapMemory) / (1024 * 1024);
     } else if (InternalDistributedSystem.getAnyInstance() == null) {
       this.localMaxMemoryExists = false;
-      return OFF_HEAP_LOCAL_MAX_MEMORY_PLACEHOLDER; // fix 52033: return non-negative, non-zero
-                                                    // temporary placeholder for
-                                                    // offHeapLocalMaxMemory
+      // fix 52033: return non-negative, non-zero temporary placeholder for offHeapLocalMaxMemory
+      return OFF_HEAP_LOCAL_MAX_MEMORY_PLACEHOLDER;
     } else {
       String offHeapSizeConfigValue =
           InternalDistributedSystem.getAnyInstance().getOriginalConfig().getOffHeapMemorySize();