You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@hbase.apache.org by "Andrew Purtell (JIRA)" <ji...@apache.org> on 2009/10/18 16:52:31 UTC

[jira] Created: (HBASE-1916) FindBugs and javac warnings cleanup

FindBugs and javac warnings cleanup
-----------------------------------

                 Key: HBASE-1916
                 URL: https://issues.apache.org/jira/browse/HBASE-1916
             Project: Hadoop HBase
          Issue Type: Bug
            Reporter: Andrew Purtell
            Assignee: Andrew Purtell
            Priority: Minor
             Fix For: 0.20.2, 0.21.0
         Attachments: omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch

FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 

All tests pass with patches applied.

There are a few real bugs fixed:

Would-be null dereference (fall through from null test):
{code}
Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
===================================================================
@@ -1222,12 +1222,13 @@
     // queue at time iterator was taken out.  Apparently goes from oldest.
     for (ToDoEntry e: this.toDo) {
       HMsg msg = e.msg;
-      if (msg == null) {
+      if (msg != null) {
+        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
+          addProcessingMessage(msg.getRegionInfo());
+        }
+      } else {
         LOG.warn("Message is empty: " + e);
       }
-      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
-        addProcessingMessage(e.msg.getRegionInfo());
-      }
     }
   }
{code}

Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
{code}
Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
===================================================================
@@ -1900,7 +1902,8 @@
         null: results.toArray(new Result[0]);
     } catch (Throwable t) {
       if (t instanceof NotServingRegionException) {
-        this.scanners.remove(scannerId);
+        String scannerName = String.valueOf(scannerId);
+        this.scanners.remove(scannerName);
       }
       throw convertThrowableToIOE(cleanup(t));
     }
{code}

Invalid equality test:
{code}
Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
===================================================================
@@ -301,11 +301,15 @@
       // should be regions.  Then in each region, should only be family
       // directories.  Under each of these, should be one file only.
       Path d = tableDirs[i].getPath();
-      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
+      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
+        continue;
+      }
       FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
       for (int j = 0; j < regionDirs.length; j++) {
         Path dd = regionDirs[j].getPath();
-        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
+        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
+          continue;
+        }
         // Else its a region name.  Now look in region for families.
         FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
         for (int k = 0; k < familyDirs.length; k++) {
@@ -360,11 +364,15 @@
       // only be family directories.  Under each of these, should be a mapfile
       // and info directory and in these only one file.
       Path d = tableDirs[i].getPath();
-      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
+      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
+        continue;
+      }
       FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
       for (int j = 0; j < regionDirs.length; j++) {
         Path dd = regionDirs[j].getPath();
-        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
+        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
+          continue;
+        }
         // Else its a region name.  Now look in region for families.
         FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
         for (int k = 0; k < familyDirs.length; k++) {
{code}

Minor race:
{code}
Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
===================================================================
@@ -197,7 +197,7 @@
      * Get this watcher's ZKW, instanciate it if necessary.
      * @return ZKW
      */
-    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
+    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
       if(zooKeeperWrapper == null) {
         zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
       } 
{code}

Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
{code}
Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
===================================================================
@@ -323,6 +323,7 @@
             
             if (tryMaster.isMasterRunning()) {
               this.master = tryMaster;
+              this.masterLock.notifyAll();
               break;
             }
             
@@ -340,7 +341,7 @@
 
           // Cannot connect to master or it is not running. Sleep & retry
           try {
-            Thread.sleep(getPauseTime(tries));
+            this.masterLock.wait(getPauseTime(tries));
           } catch (InterruptedException e) {
             // continue
           }
{code}

'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
{code}
Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
===================================================================
@@ -165,74 +165,80 @@
       this.newColumn = newColumns.get(newIndex);
       return MatchCode.INCLUDE;
     }
-    
-    
-    // There are new and old, figure which to check first
-    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
+
+    if (column != null && newColumn != null) {
+      // There are new and old, figure which to check first
+      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
         column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
         newColumn.getLength());
         
-    // Old is smaller than new, compare against old
-    if(ret <= -1) {
-      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
+      // Old is smaller than new, compare against old
+      if(ret <= -1) {
+        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
           column.getLength(), bytes, offset, length);
       
+        // Same column
+        if(ret == 0) {
+          if(column.increment() > this.maxVersions) {
+            return MatchCode.SKIP;
+          }
+          return MatchCode.INCLUDE;
+        }
+      
+        // Specified column is bigger than current column
+        // Move down current column and check again
+        if(ret <= -1) {
+          if(++index == columns.size()) {
+            this.column = null;
+          } else {
+            this.column = columns.get(index);
+          }
+          return checkColumn(bytes, offset, length);
+        }
+      
+        // ret >= 1
+        // Specified column is smaller than current column
+        // Nothing to match against, add to new and include
+        newColumns.add(new ColumnCount(bytes, offset, length, 1));
+        return MatchCode.INCLUDE;
+      }
+    }
+
+    if (newColumn != null) {
+      // Cannot be equal, so ret >= 1
+      // New is smaller than old, compare against new
+      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
+        newColumn.getLength(), bytes, offset, length);
+    
       // Same column
       if(ret == 0) {
-        if(column.increment() > this.maxVersions) {
+        if(newColumn.increment() > this.maxVersions) {
           return MatchCode.SKIP;
         }
         return MatchCode.INCLUDE;
       }
-      
+    
       // Specified column is bigger than current column
       // Move down current column and check again
       if(ret <= -1) {
-        if(++index == columns.size()) {
-          this.column = null;
+        if(++newIndex == newColumns.size()) {
+          this.newColumn = null;
         } else {
-          this.column = columns.get(index);
+          this.newColumn = newColumns.get(newIndex);
         }
         return checkColumn(bytes, offset, length);
       }
-      
+    
       // ret >= 1
       // Specified column is smaller than current column
       // Nothing to match against, add to new and include
       newColumns.add(new ColumnCount(bytes, offset, length, 1));
       return MatchCode.INCLUDE;
     }
-    
-    // Cannot be equal, so ret >= 1
-    // New is smaller than old, compare against new
-    
-    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
-        newColumn.getLength(), bytes, offset, length);
-    
-    // Same column
-    if(ret == 0) {
-      if(newColumn.increment() > this.maxVersions) {
-        return MatchCode.SKIP;
-      }
-      return MatchCode.INCLUDE;
-    }
-    
-    // Specified column is bigger than current column
-    // Move down current column and check again
-    if(ret <= -1) {
-      if(++newIndex == newColumns.size()) {
-        this.newColumn = null;
-      } else {
-        this.newColumn = newColumns.get(newIndex);
-      }
-      return checkColumn(bytes, offset, length);
-    }
-    
-    // ret >= 1
-    // Specified column is smaller than current column
-    // Nothing to match against, add to new and include
+
+    // No match happened, add to new and include
     newColumns.add(new ColumnCount(bytes, offset, length, 1));
-    return MatchCode.INCLUDE;
+    return MatchCode.INCLUDE;    
   }
{code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Updated: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "Andrew Purtell (JIRA)" <ji...@apache.org>.
     [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Andrew Purtell updated HBASE-1916:
----------------------------------

    Attachment: HBASE-1916-branch-shouldfix.patch

Committed to trunk.

There are some changes that are more than cosmetic or warnings fixes which should go in on branch. The volume of these is lower and the patch is manageable for review. Attached as HBASE-1916-branch-shouldfix.patch. 

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "stack (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767203#action_12767203 ] 

stack commented on HBASE-1916:
------------------------------

Yeah, Serializable is just a marker interface but we don't do any java serializing so thanks for backing it out.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "ryan rawson (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767182#action_12767182 ] 

ryan rawson commented on HBASE-1916:
------------------------------------

i wanted to code review this before it went in. lots of changes to fairly core code, and we cannot rely on the unit tests to protect us.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "Andrew Purtell (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767194#action_12767194 ] 

Andrew Purtell commented on HBASE-1916:
---------------------------------------

Also for the record in my opinion with the exception of the changes called out in the "shouldfix" patch -- applicable equally to trunk and branch -- there's minimal merit to the changes either way. For me this issue, and maybe follow up issues of its kind, are evidence I can point to whenever I hear "but, your code is full of bugs." No, they are warnings, and they have been reviewed and in some cases addressed.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Updated: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "Andrew Purtell (JIRA)" <ji...@apache.org>.
     [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Andrew Purtell updated HBASE-1916:
----------------------------------

    Status: Patch Available  (was: Open)

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "stack (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767148#action_12767148 ] 

stack commented on HBASE-1916:
------------------------------

+1 on applying to trunk.

Do you think we should apply this on the branch Andrew?  While each change in it self is small risk, the very volume might be risky.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Updated: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "Andrew Purtell (JIRA)" <ji...@apache.org>.
     [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Andrew Purtell updated HBASE-1916:
----------------------------------

      Resolution: Fixed
    Hadoop Flags: [Reviewed]
          Status: Resolved  (was: Patch Available)

Committed short patch to branch. All tests pass locally.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "Andrew Purtell (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767199#action_12767199 ] 

Andrew Purtell commented on HBASE-1916:
---------------------------------------

Sounds good to me. I backed out the 'Serializable' hunks as r826580.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Updated: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "Andrew Purtell (JIRA)" <ji...@apache.org>.
     [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Andrew Purtell updated HBASE-1916:
----------------------------------

    Attachment: omnibus-cleanup-branch.patch
                omnibus-cleanup-trunk.patch

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "ryan rawson (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767197#action_12767197 ] 

ryan rawson commented on HBASE-1916:
------------------------------------

I think there is harm in incorrectly suggesting that those classes are 'serialization' and that people should be serializing them.   Considering we are not using that mechanism in our code,perhaps it would be better to remove all Serialization references instead? I dont like cargo cult programming.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "Andrew Purtell (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767224#action_12767224 ] 

Andrew Purtell commented on HBASE-1916:
---------------------------------------

Not all tests on branch succeed currently. See HBASE-1917. Holding off on committing this to branch until all tests are working again.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "stack (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767204#action_12767204 ] 

stack commented on HBASE-1916:
------------------------------

I took a look at the short patch for branch.  +1 on commit if all tests pass.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "Andrew Purtell (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767191#action_12767191 ] 

Andrew Purtell commented on HBASE-1916:
---------------------------------------

Part of this was about reducing a raft of javac warnings and FindBugs warnings to find real problems. Reducing the volume of warnings was also a goal. So I tried to set the bar just above useless/pointless. Doing that is really rote and time consuming so I did it in a systematic manner without respect to generated code or otherwise, so in some cases maybe the result drops below the bar, but harmlessly (removed useless includes, etc.). 

Regarding Thrift, actually the way the generated code boxed primitives was lower performing than the changes that went in. On the other hand, modifying generated code has only a temporary result. I'd like to see Thrift come out of core so all of the warnings noise it produces will come out also. 

'Serializable' is just an interface change with no runtime impact as I understand. Correct me if I'm wrong. So, these changes contribute to a reduction in warnings without any harm.

> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


[jira] Commented: (HBASE-1916) FindBugs and javac warnings cleanup

Posted by "ryan rawson (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/HBASE-1916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12767185#action_12767185 ] 

ryan rawson commented on HBASE-1916:
------------------------------------

Btw you are aware there are edits to thrift-generated code, right?  Also needlessly turning objects into 'serializable', etc, is not making me happy here.



> FindBugs and javac warnings cleanup
> -----------------------------------
>
>                 Key: HBASE-1916
>                 URL: https://issues.apache.org/jira/browse/HBASE-1916
>             Project: Hadoop HBase
>          Issue Type: Bug
>            Reporter: Andrew Purtell
>            Assignee: Andrew Purtell
>            Priority: Minor
>             Fix For: 0.20.2, 0.21.0
>
>         Attachments: HBASE-1916-branch-shouldfix.patch, omnibus-cleanup-branch.patch, omnibus-cleanup-trunk.patch
>
>
> FindBugs was run on the 0.20 branch code recently and produced ~400 warnings, which were presented to me in a meeting. So I installed the Eclipse FindBugs plugin, analyzed both trunk and 0.20 branch, and systematically went through and addressed the warnings. About a quarter of them were incorrect (spurious, pointless, etc.). Of the remainder, most were stylistic. A few were real bugs. Attached are big patches against trunk and branch which roll up all of the changes. They are too numerous to separate out and mostly do not warrant special attention. 
> All tests pass with patches applied.
> There are a few real bugs fixed:
> Would-be null dereference (fall through from null test):
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1222,12 +1222,13 @@
>      // queue at time iterator was taken out.  Apparently goes from oldest.
>      for (ToDoEntry e: this.toDo) {
>        HMsg msg = e.msg;
> -      if (msg == null) {
> +      if (msg != null) {
> +        if (msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> +          addProcessingMessage(msg.getRegionInfo());
> +        }
> +      } else {
>          LOG.warn("Message is empty: " + e);
>        }
> -      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
> -        addProcessingMessage(e.msg.getRegionInfo());
> -      }
>      }
>    }
> {code}
> Invalid attempt to remove scanner from string keyed map using an integer key instead of scanner name:
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
> ===================================================================
> @@ -1900,7 +1902,8 @@
>          null: results.toArray(new Result[0]);
>      } catch (Throwable t) {
>        if (t instanceof NotServingRegionException) {
> -        this.scanners.remove(scannerId);
> +        String scannerName = String.valueOf(scannerId);
> +        this.scanners.remove(scannerName);
>        }
>        throw convertThrowableToIOE(cleanup(t));
>      }
> {code}
> Invalid equality test:
> {code}
> Index: src/java/org/apache/hadoop/hbase/util/FSUtils.java
> ===================================================================
> @@ -301,11 +301,15 @@
>        // should be regions.  Then in each region, should only be family
>        // directories.  Under each of these, should be one file only.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> @@ -360,11 +364,15 @@
>        // only be family directories.  Under each of these, should be a mapfile
>        // and info directory and in these only one file.
>        Path d = tableDirs[i].getPath();
> -      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
> +      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
> +        continue;
> +      }
>        FileStatus [] regionDirs = fs.listStatus(d, new DirFilter(fs));
>        for (int j = 0; j < regionDirs.length; j++) {
>          Path dd = regionDirs[j].getPath();
> -        if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
> +        if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
> +          continue;
> +        }
>          // Else its a region name.  Now look in region for families.
>          FileStatus [] familyDirs = fs.listStatus(dd, new DirFilter(fs));
>          for (int k = 0; k < familyDirs.length; k++) {
> {code}
> Minor race:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -197,7 +197,7 @@
>       * Get this watcher's ZKW, instanciate it if necessary.
>       * @return ZKW
>       */
> -    public ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
> +    public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
>        if(zooKeeperWrapper == null) {
>          zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
>        } 
> {code}
> Sleep within lock. Rewritten to sleep using lock instead. Also, notify when sleep is no longer necessary. Saves 20-30 seconds in TestZooKeeper:
> {code}
> Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
> ===================================================================
> @@ -323,6 +323,7 @@
>              
>              if (tryMaster.isMasterRunning()) {
>                this.master = tryMaster;
> +              this.masterLock.notifyAll();
>                break;
>              }
>              
> @@ -340,7 +341,7 @@
>  
>            // Cannot connect to master or it is not running. Sleep & retry
>            try {
> -            Thread.sleep(getPauseTime(tries));
> +            this.masterLock.wait(getPauseTime(tries));
>            } catch (InterruptedException e) {
>              // continue
>            }
> {code}
> 'column' and 'newColumn' are not guaranteed to not be null along some code paths. Protect against possible null dereference and make the static tool happy.
> {code}
> Index: src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
> ===================================================================
> @@ -165,74 +165,80 @@
>        this.newColumn = newColumns.get(newIndex);
>        return MatchCode.INCLUDE;
>      }
> -    
> -    
> -    // There are new and old, figure which to check first
> -    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +
> +    if (column != null && newColumn != null) {
> +      // There are new and old, figure which to check first
> +      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>          column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
>          newColumn.getLength());
>          
> -    // Old is smaller than new, compare against old
> -    if(ret <= -1) {
> -      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
> +      // Old is smaller than new, compare against old
> +      if(ret <= -1) {
> +        ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
>            column.getLength(), bytes, offset, length);
>        
> +        // Same column
> +        if(ret == 0) {
> +          if(column.increment() > this.maxVersions) {
> +            return MatchCode.SKIP;
> +          }
> +          return MatchCode.INCLUDE;
> +        }
> +      
> +        // Specified column is bigger than current column
> +        // Move down current column and check again
> +        if(ret <= -1) {
> +          if(++index == columns.size()) {
> +            this.column = null;
> +          } else {
> +            this.column = columns.get(index);
> +          }
> +          return checkColumn(bytes, offset, length);
> +        }
> +      
> +        // ret >= 1
> +        // Specified column is smaller than current column
> +        // Nothing to match against, add to new and include
> +        newColumns.add(new ColumnCount(bytes, offset, length, 1));
> +        return MatchCode.INCLUDE;
> +      }
> +    }
> +
> +    if (newColumn != null) {
> +      // Cannot be equal, so ret >= 1
> +      // New is smaller than old, compare against new
> +      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> +        newColumn.getLength(), bytes, offset, length);
> +    
>        // Same column
>        if(ret == 0) {
> -        if(column.increment() > this.maxVersions) {
> +        if(newColumn.increment() > this.maxVersions) {
>            return MatchCode.SKIP;
>          }
>          return MatchCode.INCLUDE;
>        }
> -      
> +    
>        // Specified column is bigger than current column
>        // Move down current column and check again
>        if(ret <= -1) {
> -        if(++index == columns.size()) {
> -          this.column = null;
> +        if(++newIndex == newColumns.size()) {
> +          this.newColumn = null;
>          } else {
> -          this.column = columns.get(index);
> +          this.newColumn = newColumns.get(newIndex);
>          }
>          return checkColumn(bytes, offset, length);
>        }
> -      
> +    
>        // ret >= 1
>        // Specified column is smaller than current column
>        // Nothing to match against, add to new and include
>        newColumns.add(new ColumnCount(bytes, offset, length, 1));
>        return MatchCode.INCLUDE;
>      }
> -    
> -    // Cannot be equal, so ret >= 1
> -    // New is smaller than old, compare against new
> -    
> -    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
> -        newColumn.getLength(), bytes, offset, length);
> -    
> -    // Same column
> -    if(ret == 0) {
> -      if(newColumn.increment() > this.maxVersions) {
> -        return MatchCode.SKIP;
> -      }
> -      return MatchCode.INCLUDE;
> -    }
> -    
> -    // Specified column is bigger than current column
> -    // Move down current column and check again
> -    if(ret <= -1) {
> -      if(++newIndex == newColumns.size()) {
> -        this.newColumn = null;
> -      } else {
> -        this.newColumn = newColumns.get(newIndex);
> -      }
> -      return checkColumn(bytes, offset, length);
> -    }
> -    
> -    // ret >= 1
> -    // Specified column is smaller than current column
> -    // Nothing to match against, add to new and include
> +
> +    // No match happened, add to new and include
>      newColumns.add(new ColumnCount(bytes, offset, length, 1));
> -    return MatchCode.INCLUDE;
> +    return MatchCode.INCLUDE;    
>    }
> {code}

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.